82#include "llvm/IR/IntrinsicsARM.h"
120#define DEBUG_TYPE "arm-isel"
126 "Number of constants with their storage promoted into constant pools");
130 cl::desc(
"Enable / disable ARM interworking (for debugging only)"),
135 cl::desc(
"Enable / disable promotion of unnamed_addr constants into "
140 cl::desc(
"Maximum size of constant to promote into a constant pool"),
144 cl::desc(
"Maximum size of ALL constants to promote into a constant pool"),
149 cl::desc(
"Maximum interleave factor for MVE VLDn to generate."),
154 ARM::R0, ARM::R1, ARM::R2, ARM::R3
215void ARMTargetLowering::addDRTypeForNEON(
MVT VT) {
220void ARMTargetLowering::addQRTypeForNEON(
MVT VT) {
225void ARMTargetLowering::setAllExpand(
MVT VT) {
238void ARMTargetLowering::addAllExtLoads(
const MVT From,
const MVT To,
239 LegalizeAction Action) {
245void ARMTargetLowering::addMVEVectorTypes(
bool HasMVEFP) {
466 for (
int LCID = 0;
LCID < RTLIB::UNKNOWN_LIBCALL; ++
LCID)
476 static const struct {
478 const char *
const Name;
500 { RTLIB::UO_F32,
"__unordsf2vfp",
ISD::SETNE },
509 { RTLIB::UO_F64,
"__unorddf2vfp",
ISD::SETNE },
552 static const struct {
554 const char *
const Name;
649 static const struct {
651 const char *
const Name;
672 static const struct {
674 const char *
const Name;
719 static const struct {
721 const char *
const Name;
788 if (Subtarget->
hasLOB()) {
1108 if (Subtarget->
hasDSP()) {
1208 HasStandaloneRem =
false;
1213 const char *
const Name;
1234 const char *
const Name;
1292 InsertFencesForAtomic =
false;
1306 InsertFencesForAtomic =
true;
1313 InsertFencesForAtomic =
true;
1333 if (!InsertFencesForAtomic) {
1580std::pair<const TargetRegisterClass *, uint8_t>
1593 RRC = &ARM::DPRRegClass;
1603 RRC = &ARM::DPRRegClass;
1607 RRC = &ARM::DPRRegClass;
1611 RRC = &ARM::DPRRegClass;
1615 return std::make_pair(RRC, Cost);
1619#define MAKE_CASE(V) \
1856 return &ARM::QQPRRegClass;
1858 return &ARM::QQQQPRRegClass;
1867 unsigned &PrefAlign)
const {
1885 unsigned NumVals =
N->getNumValues();
1890 EVT VT =
N->getValueType(
i);
1897 if (!
N->isMachineOpcode())
1922 return Const->getZExtValue() == 16;
1930 return Const->getZExtValue() == 16;
1938 return Const->getZExtValue() == 16;
1948 return isSHL16(Op.getOperand(0));
2007 bool isVarArg)
const {
2047 bool isVarArg)
const {
2048 return CCAssignFnForNode(CC,
false, isVarArg);
2052 bool isVarArg)
const {
2053 return CCAssignFnForNode(CC,
true, isVarArg);
2060 bool isVarArg)
const {
2061 switch (getEffectiveCallingConv(CC, isVarArg)) {
2112SDValue ARMTargetLowering::LowerCallResult(
2124 for (
unsigned i = 0;
i !=
RVLocs.size(); ++
i) {
2131 "unexpected return calling convention register assignment");
2137 if (
VA.needsCustom() &&
2142 Chain =
Lo.getValue(1);
2147 Chain =
Hi.getValue(1);
2160 Chain =
Lo.getValue(1);
2164 Chain =
Hi.getValue(1);
2179 switch (
VA.getLocInfo()) {
2190 if (
VA.needsCustom() &&
2192 Val = MoveToHPR(dl, DAG,
VA.getLocVT(),
VA.getValVT(), Val);
2194 InVals.push_back(Val);
2200std::pair<SDValue, MachinePointerInfo> ARMTargetLowering::computeAddrForCallArg(
2202 bool IsTailCall,
int SPDiff)
const {
2205 int32_t
Offset =
VA.getLocMemOffset();
2211 int Size =
VA.getLocVT().getFixedSizeInBits() / 8;
2237 unsigned id = Subtarget->
isLittle() ? 0 : 1;
2251 computeAddrForCallArg(dl, DAG,
NextVA, StackPtr, IsTailCall,
SPDiff);
2283 bool isStructRet = (Outs.empty()) ?
false : Outs[0].Flags.isSRet();
2312 return isa<Instruction>(U) &&
2313 cast<Instruction>(U)->getParent() == BB;
2319 isTailCall = IsEligibleForTailCallOptimization(
2336 "site marked musttail");
2344 unsigned NumBytes = CCInfo.getNextStackOffset();
2360 NumBytes =
alignTo(NumBytes, StackAlign);
2403 bool isByVal = Flags.isByVal();
2406 switch (
VA.getLocInfo()) {
2431 if (
VA.needsCustom() &&
2433 Arg = MoveFromHPR(dl, DAG,
VA.getLocVT(),
VA.getValVT(),
Arg);
2439 auto LocBits =
VA.getLocVT().getSizeInBits();
2460 if (
VA.isRegLoc()) {
2468 computeAddrForCallArg(dl, DAG,
VA, StackPtr, isTailCall,
SPDiff);
2471 }
else if (
VA.needsCustom() &&
VA.getLocVT() ==
MVT::f64) {
2474 }
else if (
VA.isRegLoc()) {
2475 if (
realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
2478 "unexpected calling convention register assignment");
2480 "unexpected use of 'returned'");
2485 CSInfo.emplace_back(
VA.getLocReg(),
i);
2487 }
else if (isByVal) {
2489 unsigned offset = 0;
2494 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2518 CCInfo.nextInRegsParam();
2521 if (Flags.getByValSize() > 4*offset) {
2526 computeAddrForCallArg(dl, DAG,
VA, StackPtr, isTailCall,
SPDiff);
2544 computeAddrForCallArg(dl, DAG,
VA, StackPtr, isTailCall,
SPDiff);
2572 GV =
G->getGlobal();
2582 "long-calls codegen is not position independent!");
2599 const char *Sym = S->getSymbol();
2605 ARMPCLabelIndex, 0);
2633 "Windows is the only supported COFF target");
2637 else if (!
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV))
2653 const char *Sym = S->getSymbol();
2658 ARMPCLabelIndex, 4);
2673 "Cannot handle call to ARM function or direct call");
2676 "call to non-secure function would "
2677 "require passing arguments on stack",
2684 "call to non-secure function would return value through pointer",
2721 std::vector<SDValue> Ops;
2722 Ops.push_back(Chain);
2741 Mask =
ARI->getThisReturnPreservedMask(MF, CallConv);
2747 Mask =
ARI->getCallPreservedMask(MF, CallConv);
2750 Mask =
ARI->getCallPreservedMask(MF, CallConv);
2752 assert(Mask &&
"Missing call preserved mask for calling convention");
2788 return LowerCallResult(Chain,
InFlag, CallConv, isVarArg, Ins, dl, DAG,
2797void ARMTargetLowering::HandleByVal(
CCState *State,
unsigned &
Size,
2798 Align Alignment)
const {
2800 Alignment = std::max(Alignment,
Align(4));
2802 unsigned Reg = State->AllocateReg(
GPRArgRegs);
2808 for (
unsigned i = 0;
i <
Waste; ++
i)
2814 unsigned Excess = 4 * (ARM::R4 -
Reg);
2820 const unsigned NSAAOffset = State->getNextStackOffset();
2844 Size = std::max<int>(
Size - Excess, 0);
2854 unsigned Bytes =
Arg.getValueSizeInBits() / 8;
2855 int FI = std::numeric_limits<int>::max();
2863 if (!Flags.isByVal()) {
2870 if (Flags.isByVal())
2885 assert(FI != std::numeric_limits<int>::max());
2894bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2900 const bool isIndirect)
const {
2920 if (
CallerF.hasFnAttribute(
"interrupt"))
2942 (!
TT.isOSWindows() ||
TT.isOSBinFormatELF() ||
TT.isOSBinFormatMachO()))
2949 getEffectiveCallingConv(
CalleeCC, isVarArg),
2972 if (!Outs.empty()) {
2978 if (CCInfo.getNextStackOffset()) {
2988 EVT RegVT =
VA.getLocVT();
3008 }
else if (!
VA.isRegLoc()) {
3059 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
3086 bool isLittleEndian = Subtarget->
isLittle();
3098 "secure entry function would return value through pointer",
3108 assert(
VA.isRegLoc() &&
"Can only return in registers!");
3136 switch (
VA.getLocInfo()) {
3149 Arg = MoveFromHPR(dl, DAG,
VA.getLocVT(),
VA.getValVT(),
Arg);
3151 auto LocBits =
VA.getLocVT().getSizeInBits();
3161 if (
VA.needsCustom() &&
3192 fmrrd.getValue(isLittleEndian ? 0 : 1),
Flag);
3197 fmrrd.getValue(isLittleEndian ? 1 : 0),
Flag);
3244bool ARMTargetLowering::isUsedByReturnOnly(
SDNode *
N,
SDValue &Chain)
const {
3245 if (
N->getNumValues() != 1)
3247 if (!
N->hasNUsesOfValue(1, 0))
3281 if (UI->getOperand(UI->getNumOperands()-1).getValueType() ==
MVT::Glue)
3289 if (!
Copy->hasOneUse())
3319bool ARMTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
3337 &&
"LowerWRITE_REGISTER called for non-i64 type argument.");
3343 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
3367 auto T =
const_cast<Type*
>(
CP->getType());
3368 auto C =
const_cast<Constant*
>(
CP->getConstVal());
3379 return LowerGlobalAddress(
GA, DAG);
3382 if (
CP->isMachineConstantPoolEntry())
3398 unsigned ARMPCLabelIndex = 0;
3449ARMTargetLowering::LowerGlobalTLSAddressDarwin(
SDValue Op,
3452 "This function expects a Darwin target");
3493ARMTargetLowering::LowerGlobalTLSAddressWindows(
SDValue Op,
3576 Args.push_back(Entry);
3617 Chain =
Offset.getValue(1);
3649 return LowerGlobalTLSAddressDarwin(Op, DAG);
3652 return LowerGlobalTLSAddressWindows(Op, DAG);
3661 return LowerToTLSGeneralDynamicModel(
GA, DAG);
3664 return LowerToTLSExecModels(
GA, DAG,
model);
3673 while (!Worklist.empty()) {
3674 auto *U = Worklist.pop_back_val();
3681 if (!
I ||
I->getParent()->getParent() !=
F)
3710 if (!
GVar || !
GVar->hasInitializer() ||
3711 !
GVar->isConstant() || !
GVar->hasGlobalUnnamedAddr() ||
3712 !
GVar->hasLocalLinkage())
3717 auto *
Init =
GVar->getInitializer();
3719 Init->needsDynamicRelocation())
3785 if (!(GV =
GA->getBaseObject()))
3788 return V->isConstant();
3797 return LowerGlobalAddressWindows(Op, DAG);
3799 return LowerGlobalAddressELF(Op, DAG);
3801 return LowerGlobalAddressDarwin(Op, DAG);
3874 "ROPI/RWPI not currently supported for Darwin");
3900 "Windows on ARM expects to use movw/movt");
3902 "ROPI/RWPI not currently supported for Windows");
3909 else if (!
TM.shouldAssumeDSOLocal(*GV->
getParent(), GV))
3934 Op.getOperand(1), Val);
3944SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(
SDValue Op,
3951SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
3960 case Intrinsic::arm_gnu_eabi_mcount: {
3969 assert(Mask &&
"Missing call preserved mask for calling convention");
3982 {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT),
3983 DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}),
3987 {ReturnAddress, Callee, RegisterMask, Chain}),
4000 case Intrinsic::thread_pointer: {
4004 case Intrinsic::arm_cls: {
4005 const SDValue &Operand =
Op.getOperand(1);
4006 const EVT VTy =
Op.getValueType();
4017 case Intrinsic::arm_cls64: {
4020 const SDValue &Operand =
Op.getOperand(1);
4021 const EVT VTy =
Op.getValueType();
4047 case Intrinsic::eh_sjlj_lsda: {
4070 case Intrinsic::arm_neon_vabs:
4073 case Intrinsic::arm_neon_vmulls:
4074 case Intrinsic::arm_neon_vmullu: {
4075 unsigned NewOpc = (
IntNo == Intrinsic::arm_neon_vmulls)
4078 Op.getOperand(1),
Op.getOperand(2));
4080 case Intrinsic::arm_neon_vminnm:
4081 case Intrinsic::arm_neon_vmaxnm: {
4082 unsigned NewOpc = (
IntNo == Intrinsic::arm_neon_vminnm)
4085 Op.getOperand(1),
Op.getOperand(2));
4087 case Intrinsic::arm_neon_vminu:
4088 case Intrinsic::arm_neon_vmaxu: {
4089 if (
Op.getValueType().isFloatingPoint())
4091 unsigned NewOpc = (
IntNo == Intrinsic::arm_neon_vminu)
4094 Op.getOperand(1),
Op.getOperand(2));
4096 case Intrinsic::arm_neon_vmins:
4097 case Intrinsic::arm_neon_vmaxs: {
4099 if (!
Op.getValueType().isFloatingPoint()) {
4100 unsigned NewOpc = (
IntNo == Intrinsic::arm_neon_vmins)
4103 Op.getOperand(1),
Op.getOperand(2));
4105 unsigned NewOpc = (
IntNo == Intrinsic::arm_neon_vmins)
4108 Op.getOperand(1),
Op.getOperand(2));
4110 case Intrinsic::arm_neon_vtbl1:
4112 Op.getOperand(1),
Op.getOperand(2));
4113 case Intrinsic::arm_neon_vtbl2:
4115 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4116 case Intrinsic::arm_mve_pred_i2v:
4117 case Intrinsic::arm_mve_pred_v2i:
4120 case Intrinsic::arm_mve_vreinterpretq:
4123 case Intrinsic::arm_mve_lsll:
4125 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4126 case Intrinsic::arm_mve_asrl:
4128 Op.getOperand(1),
Op.getOperand(2),
Op.getOperand(3));
4145 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
4175 return Op.getOperand(0);
4182 return Op.getOperand(0);
4187 isRead = ~isRead & 1;
4188 isData = ~isData & 1;
4206 return DAG.
getStore(Op.getOperand(0), dl,
FR, Op.getOperand(1),
4214 const SDLoc &dl)
const {
4220 RC = &ARM::tGPRRegClass;
4222 RC = &ARM::GPRRegClass;
4322 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain,
nullptr,
4329bool ARMTargetLowering::splitValueIntoRegisterParts(
4347SDValue ARMTargetLowering::joinRegisterPartsIntoValue(
4365SDValue ARMTargetLowering::LowerFormalArguments(
4396 for (
unsigned i = 0, e =
ArgLocs.size();
i !=
e; ++
i) {
4401 unsigned Index =
VA.getValNo();
4403 if (!Flags.isByVal())
4406 assert(
VA.isMemLoc() &&
"unexpected byval pointer in reg");
4426 for (
unsigned i = 0, e =
ArgLocs.size();
i !=
e; ++
i) {
4428 if (Ins[
VA.getValNo()].isOrigArg()) {
4430 Ins[
VA.getValNo()].getOrigArgIndex() -
CurArgIdx);
4434 if (
VA.isRegLoc()) {
4435 EVT RegVT =
VA.getLocVT();
4441 GetF64FormalArgument(
VA,
ArgLocs[++
i], Chain, DAG, dl);
4444 if (
VA.isMemLoc()) {
4458 }
else if (
VA.needsCustom() &&
VA.getLocVT() ==
MVT::f64) {
4459 ArgValue = GetF64FormalArgument(
VA,
ArgLocs[++
i], Chain, DAG, dl);
4464 RC = &ARM::HPRRegClass;
4466 RC = &ARM::SPRRegClass;
4469 RC = &ARM::DPRRegClass;
4472 RC = &ARM::QPRRegClass;
4475 : &ARM::GPRRegClass;
4485 if (
VA.getLocReg() == ARM::R0 && Ins[
VA.getValNo()].Flags.isReturned()) {
4493 switch (
VA.getLocInfo()) {
4514 if (
VA.needsCustom() &&
4516 ArgValue = MoveToHPR(dl, DAG,
VA.getLocVT(),
VA.getValVT(), ArgValue);
4518 InVals.push_back(ArgValue);
4524 int index =
VA.getValNo();
4536 if (Flags.isByVal()) {
4537 assert(Ins[index].isOrigArg() &&
4538 "Byval arguments cannot be implicit");
4543 VA.getLocMemOffset(), Flags.getByValSize());
4553 InVals.push_back(DAG.
getLoad(
VA.getValVT(), dl, Chain,
FIN,
4569 "secure entry function must not be variadic", dl.
getDebugLoc());
4589 "secure entry function requires arguments on stack", dl.
getDebugLoc());
4599 return CFP->getValueAPF().isPosZero();
4606 return CFP->getValueAPF().isPosZero();
4624 const SDLoc &dl)
const {
4626 unsigned C =
RHSC->getZExtValue();
4696 uint64_t
RHSV =
RHSC->getZExtValue();
4697 if (
isMask_32(Mask) && (
RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) {
4770 dl,
MVT::Glue, LHS, RHS);
4773 dl,
MVT::Glue, LHS);
4781 unsigned Opc =
Cmp.getOpcode();
4788 Opc =
Cmp.getOpcode();
4802std::pair<SDValue, SDValue>
4817 switch (
Op.getOpcode()) {
4886 EVT VT =
Op.getValueType();
4905 return Carry.getValue(1);
4929 EVT VT =
Op.getValueType();
4933 switch (
Op.getOpcode()) {
4958 EVT VT = Op.getValueType();
4969 switch (Op->getOpcode()) {
4985 switch (Op->getOpcode()) {
5015 unsigned Opc =
Cond.getOpcode();
5017 if (
Cond.getResNo() == 1 &&
5027 EVT VT =
Op.getValueType();
5059 EVT VT =
Op.getValueType();
5064 return getCMOV(dl, VT, True, False,
ARMcc,
CCR, Cmp, DAG);
5146 ARMcc,
CCR, duplicateCmp(Cmp, DAG));
5173 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
5175 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
5196 EVT VT = Op.getValueType();
5231 int64_t
PosVal = std::max(Val1, Val2);
5232 int64_t
NegVal = std::min(Val1, Val2);
5264 SDValue LHS = Op.getOperand(0);
5265 SDValue RHS = Op.getOperand(1);
5267 SDValue TrueVal = Op.getOperand(2);
5268 SDValue FalseVal = Op.getOperand(3);
5279 V = (
KTmp == TrueVal) ? FalseVal : TrueVal;
5295bool ARMTargetLowering::isUnsupportedFloatingType(
EVT VT)
const {
5306 EVT VT =
Op.getValueType();
5346 unsigned Opcode = 0;
5386 return DAG.
getNode(Opcode, dl, VT, TrueVal, FalseVal,
ARMcc, Cmp);
5390 if (isUnsupportedFloatingType(LHS.getValueType())) {
5392 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5396 if (!RHS.getNode()) {
5402 if (LHS.getValueType() ==
MVT::i32) {
5430 return getCMOV(dl, VT, FalseVal, TrueVal,
ARMcc,
CCR, Cmp, DAG);
5465 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
5476 if (!
N->hasOneUse())
5479 if (!
N->getNumValues())
5481 EVT VT = Op.getValueType();
5500 Ld->getPointerInfo(),
Ld->getAlignment(),
5501 Ld->getMemOperand()->getFlags());
5520 Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
5527 Ld->getPointerInfo().getWithOffset(4),
NewAlign,
5528 Ld->getMemOperand()->getFlags());
5561 if (LHS.getValueType() ==
MVT::f32) {
5596 unsigned Opc =
Cond.getOpcode();
5599 if (
Cond.getResNo() == 1 &&
5633 if (isUnsupportedFloatingType(LHS.getValueType())) {
5635 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5639 if (!RHS.getNode()) {
5647 unsigned Opc = LHS.getOpcode();
5676 if (LHS.getValueType() ==
MVT::i32) {
5687 if (
SDValue Result = OptimizeVFPBrcond(Op, DAG))
5726 Addr,
Op.getOperand(2), JTI);
5732 Chain =
Addr.getValue(1);
5739 Chain =
Addr.getValue(1);
5745 EVT VT = Op.getValueType();
5748 if (Op.getValueType().getVectorElementType() ==
MVT::i32) {
5749 if (Op.getOperand(0).getValueType().getVectorElementType() ==
MVT::f32)
5754 const bool HasFullFP16 =
5758 const EVT OpTy = Op.getOperand(0).getValueType();
5771 Op = DAG.
getNode(Op.getOpcode(), dl,
NewTy, Op.getOperand(0));
5776 EVT VT =
Op.getValueType();
5783 if (isUnsupportedFloatingType(
SrcVal.getValueType())) {
5815 EVT VT = Op.getValueType();
5818 if (Op.getOperand(0).getValueType().getVectorElementType() ==
MVT::i32) {
5825 Op.getOperand(0).getValueType() ==
MVT::v8i16) &&
5826 "Invalid type for custom lowering!");
5828 const bool HasFullFP16 =
5843 switch (Op.getOpcode()) {
5856 return DAG.
getNode(Opc, dl, VT, Op);
5860 EVT VT =
Op.getValueType();
5863 if (isUnsupportedFloatingType(VT)) {
5884 EVT VT =
Op.getValueType();
5969 EVT VT =
Op.getValueType();
5992 EVT VT =
Op.getValueType();
6008 .Case(
"sp", ARM::SP)
6024 &&
"ExpandREAD_REGISTER called for non-i64 type result.");
6033 Results.push_back(Read.getOperand(0));
6135 SrcVT.getVectorNumElements() > 1)
6168 assert(
Op.getNumOperands() == 3 &&
"Not a double-shift!");
6169 EVT VT =
Op.getValueType();
6212 assert(
Op.getNumOperands() == 3 &&
"Not a double-shift!");
6213 EVT VT =
Op.getValueType();
6260 Chain =
FPSCR.getValue(1);
6299 Chain =
FPSCR.getValue(1);
6315 EVT VT =
N->getValueType(0);
6316 if (VT.
isVector() && ST->hasNEON()) {
6336 unsigned NumBits = ElemTy.getSizeInBits();
6361 if (!ST->hasV6T2Ops())
6370 EVT VT =
N->getValueType(0);
6373 assert(ST->hasNEON() &&
"Custom ctpop lowering requires NEON.");
6376 "Unexpected type for custom ctpop lowering");
6384 unsigned EltSize = 8;
6388 Ops.push_back(DAG.
getConstant(Intrinsic::arm_neon_vpaddlu,
DL,
6407 Op = Op.getOperand(0);
6409 APInt SplatBits, SplatUndef;
6410 unsigned SplatBitSize;
6413 !
BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
HasAnyUndefs,
6426 assert(VT.
isVector() &&
"vector shift count is not a vector type");
6441 assert(VT.
isVector() &&
"vector shift count is not a vector type");
6456 EVT VT =
N->getValueType(0);
6478 "unexpected vector shift opcode");
6480 if (
isVShiftRImm(
N->getOperand(1), VT,
false,
false, Cnt)) {
6499 EVT VT =
N->getValueType(0);
6508 "Unknown shift to lower!");
6510 unsigned ShOpc =
N->getOpcode();
6511 if (ST->hasMVEIntegerOps()) {
6519 (
Con && (
Con->getZExtValue() == 0 ||
Con->getZExtValue() >= 32)))
6549 Hi =
SDValue(Lo.getNode(), 1);
6558 if (ST->isThumb1Only())
6585 SDValue Op0 = Op.getOperand(0);
6586 SDValue Op1 = Op.getOperand(1);
6587 SDValue CC = Op.getOperand(2);
6588 EVT VT = Op.getValueType();
6596 assert(ST->hasMVEIntegerOps() &&
6597 "No hardware support for integer vector comparison!");
6599 if (Op.getValueType().getVectorElementType() !=
MVT::i1)
6638 if (ST->hasMVEFloatOps()) {
6666 Result = DAG.
getNOT(dl, Result, VT);
6678 Result = DAG.
getNOT(dl, Result, VT);
6687 if (ST->hasMVEIntegerOps()) {
6704 if (ST->hasNEON() && Opc ==
ARMCC::EQ) {
6712 if (AndOp.getNode() && AndOp.getOpcode() ==
ISD::BITCAST)
6715 if (AndOp.getNode() && AndOp.getOpcode() ==
ISD::AND) {
6720 Result = DAG.
getNOT(dl, Result, VT);
6754 Result = DAG.
getNOT(dl, Result, VT);
6760 SDValue LHS = Op.getOperand(0);
6761 SDValue RHS = Op.getOperand(1);
6766 assert(LHS.getSimpleValueType().isInteger() &&
"SETCCCARRY is integer only.");
6807 switch (SplatBitSize) {
6812 assert((SplatBits & ~0xff) == 0 &&
"one byte splat value is too big");
6821 if ((SplatBits & ~0xff) == 0) {
6827 if ((SplatBits & ~0xff00) == 0) {
6830 Imm = SplatBits >> 8;
6841 if ((SplatBits & ~0xff) == 0) {
6847 if ((SplatBits & ~0xff00) == 0) {
6850 Imm = SplatBits >> 8;
6853 if ((SplatBits & ~0xff0000) == 0) {
6856 Imm = SplatBits >> 16;
6859 if ((SplatBits & ~0xff000000) == 0) {
6862 Imm = SplatBits >> 24;
6869 if ((SplatBits & ~0xffff) == 0 &&
6870 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
6873 Imm = SplatBits >> 8;
6881 if ((SplatBits & ~0xffffff) == 0 &&
6882 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
6885 Imm = SplatBits >> 16;
6900 uint64_t BitMask = 0xff;
6901 unsigned ImmMask = 1;
6904 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
6906 }
else if ((SplatBits & BitMask) != 0) {
6942 EVT VT =
Op.getValueType();
6949 if (
ST->genExecuteOnly()) {
6971 if (!
ST->hasVFP3Base())
6983 if (
IsDouble || !
ST->useNEONForSinglePrecisionFP()) {
7001 if (!
ST->hasNEON() || (!
IsDouble && !
ST->useNEONForSinglePrecisionFP()))
7005 uint64_t
iVal =
FPVal.bitcastToAPInt().getZExtValue();
7072 if (M[
i] < 0)
continue;
7104 if (M[
i] < 0)
continue;
7120 return VT ==
MVT::v8i8 && M.size() == 8;
7125 if (Mask.size() == Elements * 2)
7126 return Index / Elements;
7127 return Mask[
Index] == 0 ? 0 : 1;
7165 for (
unsigned i = 0;
i < M.size();
i +=
NumElts) {
7192 for (
unsigned i = 0;
i < M.size();
i +=
NumElts) {
7224 for (
unsigned i = 0;
i < M.size();
i +=
NumElts) {
7255 for (
unsigned i = 0;
i < M.size();
i +=
NumElts) {
7257 for (
unsigned j = 0;
j <
NumElts;
j += Half) {
7259 for (
unsigned k = 0;
k < Half; ++
k) {
7295 for (
unsigned i = 0;
i < M.size();
i +=
NumElts) {
7299 if ((M[
i+
j] >= 0 && (
unsigned) M[
i+
j] !=
Idx) ||
7328 for (
unsigned i = 0;
i < M.size();
i +=
NumElts) {
7332 if ((M[
i+
j] >= 0 && (
unsigned) M[
i+
j] !=
Idx) ||
7333 (M[
i+
j+1] >= 0 && (
unsigned) M[
i+
j+1] !=
Idx))
7382 if (M[
i] >= 0 && M[
i] != (
int) (
NumElts - 1 -
i))
7400 unsigned Offset = Top ? 0 : 1;
7403 if (M[
i] >= 0 && M[
i] != (
int)
i)
7405 if (M[
i + 1] >= 0 && M[
i + 1] != (
int)(
N +
i +
Offset))
7425 if (M[
i] >= 0 && M[
i] != (
int)(
Off0 +
i / 2))
7427 if (M[
i + 1] >= 0 && M[
i + 1] != (
int)(
Off1 +
i / 2))
7443 if (!ST->hasMVEFloatOps())
7447 EVT VT =
BV.getValueType();
7457 BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0)
7461 BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0)
7463 SDValue Op0 =
BV.getOperand(0).getOperand(0).getOperand(0);
7464 SDValue Op1 =
BV.getOperand(1).getOperand(0).getOperand(0);
7469 for (
unsigned i = 1;
i < 4;
i++) {
7476 if (!Check(
BV.getOperand(
i * 2 + 0), Op0,
i))
7478 if (!Check(
BV.getOperand(
i * 2 + 1), Op1,
i))
7496 if (!ST->hasMVEFloatOps())
7500 EVT VT =
BV.getValueType();
7511 SDValue Op0 =
BV.getOperand(0).getOperand(0).getOperand(0);
7512 int Offset =
BV.getOperand(0).getOperand(0).getConstantOperandVal(1);
7517 for (
unsigned i = 1;
i < 4;
i++) {
7524 if (!Check(
BV.getOperand(
i), Op0, 2 *
i +
Offset))
7542 if (ST->isThumb1Only()) {
7543 if (Val <= 255 || ~Val <= 255)
7555 EVT VT = Op.getValueType();
7557 assert(ST->hasMVEIntegerOps() &&
"LowerBUILD_VECTOR_i1 called without MVE!");
7576 SDValue FirstOp = Op.getOperand(0);
7578 std::all_of(std::next(Op->op_begin()), Op->op_end(),
7579 [&FirstOp](
SDUse &U) {
7580 return U.get().isUndef() || U.get() == FirstOp;
7614 if (!ST->hasMVEIntegerOps())
7618 EVT VT = Op.getValueType();
7619 SDValue Op0 = Op.getOperand(0);
7623 SDValue Op1 = Op.getOperand(1);
7628 if (
N != 1 &&
N != 2 &&
N != 4 &&
N != 8)
7651 EVT VT =
Op.getValueType();
7659 APInt SplatBits, SplatUndef;
7660 unsigned SplatBitSize;
7662 if (
BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
HasAnyUndefs)) {
7666 if ((
ST->hasNEON() && SplatBitSize <= 64) ||
7667 (
ST->hasMVEIntegerOps() && SplatBitSize <= 64)) {
7680 uint64_t
NegatedImm = (~SplatBits).getZExtValue();
7700 if (
ST->hasMVEIntegerOps() &&
7701 (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32)) {
7738 ValueCounts.insert(std::make_pair(V, 0));
7739 unsigned &Count = ValueCounts[V];
7742 if (++Count > (
NumElts / 2)) {
7747 if (ValueCounts.size() != 1)
7749 if (!
Value.getNode() && !ValueCounts.empty())
7750 Value = ValueCounts.begin()->first;
7752 if (ValueCounts.empty())
7780 if (VT !=
Value->getOperand(0).getValueType()) {
7781 unsigned index =
constIndex->getAPIntValue().getLimitedValue() %
7801 Ops.push_back(
Op.getOperand(
I));
7818 Val = LowerBUILD_VECTOR(Val, DAG, ST);
7870 if (EltSize >= 32) {
7909 EVT VT =
Op.getValueType();
7914 unsigned MinElt = std::numeric_limits<unsigned>::max();
7952 if (Source == Sources.end())
7963 if (Sources.size() > 2)
7969 for (
auto &Source : Sources) {
7982 for (
auto &Src : Sources) {
8038 Src.WindowBase = -Src.MinElt;
8045 for (
auto &Src : Sources) {
8052 Src.WindowBase *= Src.WindowScale;
8065 if (Entry.isUndef())
8068 auto Src =
llvm::find(Sources, Entry.getOperand(0));
8074 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
8092 assert(Sources.size() <= 2 &&
"Too many sources!");
8095 for (
unsigned i = 0;
i < Sources.size(); ++
i)
8124 unsigned OpNum = (
PFEntry >> 26) & 0x0F;
8145 for (
unsigned i = 0;
i != 4; ++
i) {
8156 unsigned Cost = (
PFEntry >> 30);
8166 if (EltSize >= 32 ||
8173 else if (Subtarget->
hasNEON() &&
8194 unsigned OpNum = (
PFEntry >> 26) & 0x0F;
8199 if (
LHSID == (1*9+2)*9+3)
return LHS;
8200 assert(
LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
8255 SDValue V2 = Op.getOperand(1);
8260 I = ShuffleMask.begin(),
E = ShuffleMask.end();
I !=
E; ++
I)
8263 if (V2.getNode()->isUndef())
8278 "Expect an v8i16/v16i8 type");
8338 EVT VT = Op.getValueType();
8342 assert(ST->hasMVEIntegerOps() &&
8343 "No support for vector shuffle of boolean predicates");
8383 EVT VT = Op.getValueType();
8388 "Unexpected vector type");
8399 for (
int i = 0;
i < Length;
i++) {
8400 if (ShuffleMask[Start +
i] >= 0) {
8401 if (ShuffleMask[Start +
i] % Length !=
i)
8403 MovIdx = ShuffleMask[Start +
i] / Length;
8411 for (
int i = 1;
i < Length;
i++) {
8412 if (ShuffleMask[Start +
i] >= 0 &&
8413 (ShuffleMask[Start +
i] / Length !=
MovIdx ||
8414 ShuffleMask[Start +
i] % Length !=
i))
8420 for (
int Part = 0; Part < 4; ++Part) {
8424 SDValue Input = Op->getOperand(0);
8426 Input = Op->getOperand(1);
8436 if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3])
8441 if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) {
8443 for (
int Part = 0; Part < 4; ++Part)
8446 Parts[Part] ? -1 : ShuffleMask[Part *
QuarterSize +
i]);
8451 for (
int Part = 0; Part < 4; ++Part)
8466 SDValue V2 = Op.getOperand(1);
8467 EVT VT = Op.getValueType();
8483 if (Mask[
i] !=
i + BaseOffset) {
8516 SDValue V2 = Op.getOperand(1);
8518 EVT VT = Op.getValueType();
8522 if (ST->hasMVEIntegerOps() && EltSize == 1)
8533 if (EltSize <= 32) {
8534 if (
SVN->isSplat()) {
8535 int Lane =
SVN->getSplatIndex();
8537 if (Lane == -1) Lane = 0;
8549 for (
unsigned i = 1, e =
V1.getNumOperands();
i != e; ++
i)
8550 if (!
V1.getOperand(
i).isUndef()) {
8589 if (ST->hasNEON()) {
8598 if (ST->hasMVEIntegerOps()) {
8633 }) &&
"Unexpected shuffle index into UNDEF operand!");
8640 "In-place shuffle of concat can only have one result!");
8649 if (ST->hasMVEIntegerOps() && EltSize <= 32)
8658 for (
unsigned i = 0;
i != 4; ++
i) {
8659 if (ShuffleMask[
i] < 0)
8669 unsigned Cost = (
PFEntry >> 30);
8686 if (EltSize >= 32) {
8695 if (ShuffleMask[
i] < 0)
8714 if (ST->hasMVEIntegerOps())
8723 EVT VecVT = Op.getOperand(0).getValueType();
8726 assert(ST->hasMVEIntegerOps() &&
8727 "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8753 Op.getValueType().getScalarSizeInBits() == 1)
8786 EVT VecVT = Op.getOperand(0).getValueType();
8789 assert(ST->hasMVEIntegerOps() &&
8790 "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8805 SDValue Lane = Op.getOperand(1);
8809 SDValue Vec = Op.getOperand(0);
8826 SDValue V2 = Op.getOperand(1);
8828 EVT VT = Op.getValueType();
8835 "Unexpected custom CONCAT_VECTORS lowering");
8836 assert(ST->hasMVEIntegerOps() &&
8837 "CONCAT_VECTORS lowering only supported for MVE");
8857 for (
unsigned i = 0, e =
NewVT.getVectorNumElements();
i < e;
i++,
j++) {
8877 EVT VT = Op->getValueType(0);
8883 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
8884 "unexpected CONCAT_VECTORS");
8887 SDValue Op0 = Op.getOperand(0);
8888 SDValue Op1 = Op.getOperand(1);
8903 SDValue V2 = Op.getOperand(1);
8905 EVT VT = Op.getValueType();
8911 "Unexpected custom EXTRACT_SUBVECTOR lowering");
8912 assert(ST->hasMVEIntegerOps() &&
8913 "EXTRACT_SUBVECTOR lowering only supported for MVE");
8924 for (
unsigned i = Index,
j = 0;
i < (Index +
NumElts);
i++,
j++) {
8940 assert(ST->hasMVEIntegerOps() &&
"Expected MVE!");
8941 EVT VT =
N->getValueType(0);
8943 "Expected a vector i1 type!");
9004 EVT FromVT =
N->getOperand(0).getValueType();
9053 EVT VT =
N->getValueType(0);
9068 if (
Hi0->getSExtValue() ==
Lo0->getSExtValue() >> 32 &&
9069 Hi1->getSExtValue() ==
Lo1->getSExtValue() >> 32)
9072 if (
Hi0->isNullValue() &&
Hi1->isNullValue())
9081 for (
unsigned i = 0, e =
N->getNumOperands();
i != e; ++
i) {
9123 if (
OrigVT.getSizeInBits() >= 64)
9126 assert(
OrigVT.isSimple() &&
"Expecting a simple value type");
9169 return DAG.
getLoad(LD->getMemoryVT(),
SDLoc(LD), LD->getChain(),
9170 LD->getBasePtr(), LD->getPointerInfo(),
9171 LD->getAlignment(), LD->getMemOperand()->getFlags());
9177 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
9178 LD->getMemoryVT(), LD->getAlignment(),
9179 LD->getMemOperand()->getFlags());
9198 "Expected extending load");
9215 BVN->getValueType(0) ==
MVT::v4i32 &&
"expected v4i32 BUILD_VECTOR");
9223 EVT VT =
N->getValueType(0);
9240 unsigned Opcode =
N->getOpcode();
9243 SDNode *
N1 =
N->getOperand(1).getNode();
9251 unsigned Opcode =
N->getOpcode();
9254 SDNode *
N1 =
N->getOperand(1).getNode();
9264 EVT VT = Op.getValueType();
9266 "unexpected type for custom-lowering ISD::MUL");
9267 SDNode *N0 = Op.getOperand(0).getNode();
9268 SDNode *
N1 = Op.getOperand(1).getNode();
9314 "unexpected types for extended operands to VMULL");
9408 EVT VT = Op.getValueType();
9410 "unexpected type for custom-lowering ISD::SDIV");
9413 SDValue N0 = Op.getOperand(0);
9445 EVT VT = Op.getValueType();
9447 "unexpected type for custom-lowering ISD::UDIV");
9450 SDValue N0 = Op.getOperand(0);
9521 EVT VT =
N->getValueType(0);
9535 Op.getOperand(1),
Carry);
9549 Op.getOperand(1),
Carry);
9570 EVT ArgVT =
Arg.getValueType();
9586 const uint64_t ByteSize =
DL.getTypeAllocSize(
RetTy);
9593 Entry.Ty =
RetTy->getPointerTo();
9594 Entry.IsSExt =
false;
9595 Entry.IsZExt =
false;
9596 Entry.IsSRet =
true;
9597 Args.push_back(Entry);
9604 Entry.IsSExt =
false;
9605 Entry.IsZExt =
false;
9606 Args.push_back(Entry);
9641 EVT VT =
Op.getValueType();
9643 "unexpected type for custom lowering DIV");
9649 const char *
Name =
nullptr;
9659 for (
auto AI : {1, 0}) {
9661 Arg.Node =
Op.getOperand(AI);
9666 CallLoweringInfo CLI(DAG);
9670 ES, std::move(Args));
9680ARMTargetLowering::BuildSDIVPow2(
SDNode *
N,
const APInt &Divisor,
9688 const bool MinSize =
ST.hasMinSize();
9689 const bool HasDivide =
ST.isThumb() ?
ST.hasDivideInThumbMode()
9690 :
ST.hasDivideInARMMode();
9694 if (
N->getOperand(0).getValueType().isVector())
9712 if (Divisor.
sgt(128))
9721 "unexpected type for custom lowering DIV");
9727 return LowerWindowsDIVLibCall(Op, DAG,
Signed,
DBZCHK);
9743void ARMTargetLowering::ExpandDIV_Windows(
9750 "unexpected type for custom lowering DIV");
9769 "Expected a predicate type!");
9772 "Expected a non-extending load");
9773 assert(LD->isUnindexed() &&
"Expected a unindexed load");
9789 LD->getMemOperand());
9806 assert(
LD->isUnindexed() &&
"Loads should be unindexed at this point.");
9813 {LD->getChain(), LD->getBasePtr()},
MemVT,
LD->getMemOperand());
9825 "Expected a predicate type!");
9827 assert(!ST->isTruncatingStore() &&
"Expected a non-extending store");
9828 assert(ST->isUnindexed() &&
"Expected a unindexed store");
9833 SDValue Build = ST->getValue();
9836 for (
unsigned I = 0;
I <
MemVT.getVectorNumElements();
I++) {
9838 ?
MemVT.getVectorNumElements() -
I - 1
9843 for (
unsigned I =
MemVT.getVectorNumElements();
I < 16;
I++)
9853 ST->getChain(), dl,
GRP, ST->getBasePtr(),
9855 ST->getMemOperand());
9862 assert(ST->isUnindexed() &&
"Stores should be unindexed at this point.");
9879 {ST->getChain(), Lo, Hi, ST->getBasePtr()},
9880 MemVT, ST->getMemOperand());
9898 MVT VT = Op.getSimpleValueType();
9911 VT, dl,
N->getChain(),
N->getBasePtr(),
N->getOffset(), Mask,
ZeroVec,
9912 N->getMemoryVT(),
N->getMemOperand(),
N->getAddressingMode(),
9913 N->getExtensionType(),
N->isExpandingLoad());
9925 if (!ST->hasMVEIntegerOps())
9929 unsigned BaseOpcode = 0;
9930 switch (Op->getOpcode()) {
9942 SDValue Op0 = Op->getOperand(0);
9950 "Only expected a power 2 vector size");
9957 Op0 = DAG.
getNode(BaseOpcode, dl, VT, Op0,
Rev);
9984 if (
EltVT != Op->getValueType(0))
9991 if (!ST->hasMVEFloatOps())
10013 SDValue Ops[] = {
N->getOperand(0),
10030 SDLoc dl(V.getNode());
10051 "AtomicCmpSwap on types less than 64 should be legal");
10052 SDValue Ops[] = {
N->getOperand(1),
10057 ARM::CMP_SWAP_64,
SDLoc(
N),
10077 EVT VT =
Op.getValueType();
10086 if (isUnsupportedFloatingType(LHS.getValueType())) {
10088 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain,
IsSignaling);
10089 if (!RHS.getNode()) {
10090 RHS = DAG.
getConstant(0, dl, LHS.getValueType());
10122 switch (Op.getOpcode()) {
10132 case ISD::BR_CC:
return LowerBR_CC(Op, DAG);
10133 case ISD::BR_JT:
return LowerBR_JT(Op, DAG);
10152 case ISD::BITCAST:
return ExpandBITCAST(Op.getNode(), DAG, Subtarget);
10156 case ISD::SREM:
return LowerREM(Op.getNode(), DAG);
10157 case ISD::UREM:
return LowerREM(Op.getNode(), DAG);
10181 return LowerDIV_Windows(Op, DAG,
true);
10185 return LowerDIV_Windows(Op, DAG,
false);
10191 return LowerSignedALUO(Op, DAG);
10194 return LowerUnsignedALUO(Op, DAG);
10223 return LowerDYNAMIC_STACKALLOC(Op, DAG);
10239 if (
IntNo == Intrinsic::arm_smlald)
10241 else if (
IntNo == Intrinsic::arm_smlaldx)
10243 else if (
IntNo == Intrinsic::arm_smlsld)
10245 else if (
IntNo == Intrinsic::arm_smlsldx)
10260 N->getOperand(1),
N->getOperand(2),
10272 switch (
N->getOpcode()) {
10279 Res = ExpandBITCAST(
N, DAG, Subtarget);
10288 Res = LowerREM(
N, DAG);
10292 Res = LowerDivRem(
SDValue(
N, 0), DAG);
10345 "ROPI/RWPI not currently supported with SjLj");
10355 bool isThumb2 = Subtarget->
isThumb2();
10364 : &ARM::GPRRegClass;
10478 : &ARM::GPRnopcRegClass;
10486 if (!BB->isEHPad())
continue;
10491 II = BB->begin(),
IE = BB->end(); II !=
IE; ++II) {
10492 if (!II->isEHLabel())
continue;
10494 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
10509 std::vector<MachineBasicBlock*>
LPadList;
10517 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
10522 "No landing pad destinations for the dispatch jump table!");
10595 if ((
NumLPads & 0xFFFF0000) != 0) {
10631 }
else if (Subtarget->
isThumb()) {
10732 if ((
NumLPads & 0xFFFF0000) != 0) {
10806 for (std::vector<MachineBasicBlock*>::iterator
10809 if (
SeenMBBs.insert(CurMBB).second)
10821 while (!Successors.empty()) {
10823 if (
SMBB->isEHPad()) {
10824 BB->removeSuccessor(
SMBB);
10830 BB->normalizeSuccProbs();
10837 II = BB->rbegin(), IE = BB->rend(); II !=
IE; ++II) {
10838 if (!II->isCall())
continue;
10842 OI = II->operands_begin(),
OE = II->operands_end();
10844 if (!
OI->isReg())
continue;
10845 DefRegs[
OI->getReg()] =
true;
10850 for (
unsigned i = 0; SavedRegs[
i] != 0; ++
i) {
10851 unsigned Reg = SavedRegs[
i];
10853 !ARM::tGPRRegClass.contains(Reg) &&
10854 !ARM::hGPRRegClass.contains(Reg))
10856 if (Subtarget->
isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
10858 if (!Subtarget->
isThumb() && !ARM::GPRRegClass.contains(Reg))
10872 (*I)->setIsEHPad(
false);
10875 MI.eraseFromParent();
10891 return LdSize == 16 ? ARM::VLD1q32wb_fixed
10892 :
LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
10894 return LdSize == 4 ? ARM::tLDRi
10895 :
LdSize == 2 ? ARM::tLDRHi
10896 :
LdSize == 1 ? ARM::tLDRBi : 0;
10898 return LdSize == 4 ? ARM::t2LDR_POST
10899 :
LdSize == 2 ? ARM::t2LDRH_POST
10900 :
LdSize == 1 ? ARM::t2LDRB_POST : 0;
10901 return LdSize == 4 ? ARM::LDR_POST_IMM
10902 :
LdSize == 2 ? ARM::LDRH_POST
10903 :
LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
10910 return StSize == 16 ? ARM::VST1q32wb_fixed
10911 :
StSize == 8 ? ARM::VST1d32wb_fixed : 0;
10913 return StSize == 4 ? ARM::tSTRi
10914 :
StSize == 2 ? ARM::tSTRHi
10915 :
StSize == 1 ? ARM::tSTRBi : 0;
10917 return StSize == 4 ? ARM::t2STR_POST
10918 :
StSize == 2 ? ARM::t2STRH_POST
10919 :
StSize == 1 ? ARM::t2STRB_POST : 0;
10920 return StSize == 4 ? ARM::STR_POST_IMM
10921 :
StSize == 2 ? ARM::STRH_POST
10922 :
StSize == 1 ? ARM::STRB_POST_IMM : 0;
10932 assert(
LdOpc != 0 &&
"Should have a load opcode");
10973 assert(
StOpc != 0 &&
"Should have a store opcode");
11020 unsigned SizeVal =
MI.getOperand(2).getImm();
11021 unsigned Alignment =
MI.getOperand(3).getImm();
11026 unsigned UnitSize = 0;
11032 bool IsThumb = Subtarget->
isThumb();
11034 if (Alignment & 1) {
11036 }
else if (Alignment & 2) {
11042 if ((Alignment % 16 == 0) &&
SizeVal >= 16)
11044 else if ((Alignment % 8 == 0) &&
SizeVal >= 8)
11053 bool IsNeon = UnitSize >= 8;
11054 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
11056 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
11057 : UnitSize == 8 ? &ARM::DPRRegClass
11067 unsigned srcIn = src;
11069 for (
unsigned i = 0;
i < LoopSize;
i+=UnitSize) {
11095 MI.eraseFromParent();
11127 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11133 if ((LoopSize & 0xFFFF0000) != 0)
11136 .addImm(LoopSize & 0xFFFF)
11139 if ((LoopSize & 0xFFFF0000) != 0)
11142 .addImm(LoopSize >> 16)
11249 MI.eraseFromParent();
11261 "__chkstk is only supported on Windows");
11262 assert(Subtarget->
isThumb2() &&
"Windows on ARM requires Thumb-2 mode");
11282 switch (
TM.getCodeModel()) {
11300 Register Reg =
MRI.createVirtualRegister(&ARM::rGPRRegClass);
11324 MI.eraseFromParent();
11339 ContBB->transferSuccessorsAndUpdatePHIs(
MBB);
11348 .
addReg(
MI.getOperand(0).getReg())
11356 MI.eraseFromParent();
11448 SrcPhiReg =
MRI.createVirtualRegister(&ARM::rGPRRegClass);
11449 CurrSrcReg =
MRI.createVirtualRegister(&ARM::rGPRRegClass);
11469 MRI.createVirtualRegister(&ARM::GPRlrRegClass);
11540 bool isThumb2 = Subtarget->
isThumb2();
11541 switch (
MI.getOpcode()) {
11548 case ARM::tLDR_postidx: {
11552 .
add(
MI.getOperand(2))
11553 .
add(
MI.getOperand(3))
11554 .
add(
MI.getOperand(4))
11555 .
add(
MI.getOperand(0))
11557 MI.eraseFromParent();
11561 case ARM::MVE_MEMCPYLOOPINST:
11562 case ARM::MVE_MEMSETLOOPINST: {
11616 "block containing memcpy/memset Pseudo");
11629 bool IsMemcpy =
MI.getOpcode() == ARM::MVE_MEMCPYLOOPINST;
11646 MI.eraseFromParent();
11656 case ARM::t2STR_preidx:
11657 MI.setDesc(
TII->get(ARM::t2STR_PRE));
11659 case ARM::t2STRB_preidx:
11660 MI.setDesc(
TII->get(ARM::t2STRB_PRE));
11662 case ARM::t2STRH_preidx:
11663 MI.setDesc(
TII->get(ARM::t2STRH_PRE));
11666 case ARM::STRi_preidx:
11667 case ARM::STRBi_preidx: {
11668 unsigned NewOpc =
MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
11669 : ARM::STRB_PRE_IMM;
11671 unsigned Offset =
MI.getOperand(4).getImm();
11679 .
add(
MI.getOperand(0))
11680 .
add(
MI.getOperand(1))
11681 .
add(
MI.getOperand(2))
11683 .
add(
MI.getOperand(5))
11684 .
add(
MI.getOperand(6))
11686 MI.eraseFromParent();
11689 case ARM::STRr_preidx:
11690 case ARM::STRBr_preidx:
11691 case ARM::STRH_preidx: {
11693 switch (
MI.getOpcode()) {
11695 case ARM::STRr_preidx:
NewOpc = ARM::STR_PRE_REG;
break;
11696 case ARM::STRBr_preidx:
NewOpc = ARM::STRB_PRE_REG;
break;
11697 case ARM::STRH_preidx:
NewOpc = ARM::STRH_PRE;
break;
11700 for (
unsigned i = 0;
i <
MI.getNumOperands(); ++
i)
11701 MIB.
add(
MI.getOperand(
i));
11702 MI.eraseFromParent();
11706 case ARM::tMOVCCr_pseudo: {
11729 if (!
MI.killsRegister(ARM::CPSR) &&
11732 sinkMBB->addLiveIn(ARM::CPSR);
11738 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11745 .
addImm(
MI.getOperand(3).getImm())
11746 .
addReg(
MI.getOperand(4).getReg());
11761 .
addReg(
MI.getOperand(1).getReg())
11763 .
addReg(
MI.getOperand(2).getReg())
11766 MI.eraseFromParent();
11771 case ARM::BCCZi64: {
11782 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11786 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11792 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
11796 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
11806 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
11815 MI.eraseFromParent();
11819 case ARM::Int_eh_sjlj_setjmp:
11820 case ARM::Int_eh_sjlj_setjmp_nofp:
11821 case ARM::tInt_eh_sjlj_setjmp:
11822 case ARM::t2Int_eh_sjlj_setjmp:
11823 case ARM::t2Int_eh_sjlj_setjmp_nofp:
11826 case ARM::Int_eh_sjlj_setup_dispatch:
11827 EmitSjLjDispatchBlock(
MI, BB);
11855 bool isThumb2 = Subtarget->
isThumb2();
11860 isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
11865 SinkBB->transferSuccessorsAndUpdatePHIs(BB);
11874 BuildMI(BB, dl,
TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11902 MI.eraseFromParent();
11907 case ARM::COPY_STRUCT_BYVAL_I32:
11909 return EmitStructByval(
MI, BB);
11910 case ARM::WIN__CHKSTK:
11911 return EmitLowered__chkstk(
MI, BB);
11912 case ARM::WIN__DBZCHK:
11913 return EmitLowered__dbzchk(
MI, BB);
11930 if (!
Node->hasAnyUseOfValue(0)) {
11931 MI.getOperand(0).setIsDead(
true);
11933 if (!
Node->hasAnyUseOfValue(1)) {
11934 MI.getOperand(1).setIsDead(
true);
11938 for (
unsigned I = 0;
I !=
MI.getOperand(4).getImm(); ++
I) {
11939 Register TmpReg =
MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
11940 : &ARM::GPRRegClass);
11947 if (
MI.getOpcode() == ARM::MEMCPY) {
11968 MI.getDesc().getNumOperands() + 5 -
MI.getDesc().getSize()
11969 &&
"converted opcode should be the same except for cc_out"
11970 " (and, on Thumb1, pred)");
11980 MI.addOperand(
MI.getOperand(1));
11981 MI.RemoveOperand(1);
11985 for (
unsigned i =
MI.getNumOperands();
i--;) {
11987 if (
op.isReg() &&
op.isUse()) {
11990 MI.tieOperands(DefIdx,
i);
12005 assert(!
NewOpc &&
"Optional cc_out operand required");
12019 MI.RemoveOperand(
i);
12024 assert(!
NewOpc &&
"Optional cc_out operand required");
12027 assert(
deadCPSR == !Node->hasAnyUseOfValue(1) &&
"inconsistent dead flag");
12030 "expect uninitialized optional cc_out operand");
12068 switch (
N->getOpcode()) {
12069 default:
return false;
12093 EVT VT =
N->getValueType(0);
12140 bool AllOnes =
false) {
12142 EVT VT =
N->getValueType(0);
12159 CCOp, TrueVal, FalseVal);
12171 if (
N1.getNode()->hasOneUse())
12198 if (!
N->getValueType(0).is64BitVector())
12206 EVT VT =
N->getValueType(0);
12209 Ops.push_back(DAG.
getConstant(Intrinsic::arm_neon_vpadd, dl,
12211 Ops.push_back(
Unzip->getOperand(0));
12212 Ops.push_back(
Unzip->getOperand(1));
12237 if (!
N00.getValueType().is64BitVector() ||
12245 EVT VT =
N->getValueType(0);
12251 Opcode = Intrinsic::arm_neon_vpaddls;
12253 Opcode = Intrinsic::arm_neon_vpaddlu;
12256 EVT ElemTy =
N00.getValueType().getVectorElementType();
12260 N00.getOperand(0),
N00.getOperand(1));
12275 if (
DCI.isBeforeLegalize() || !Subtarget->
hasNEON()
12281 EVT VT =
N->getValueType(0);
12309 if (V !=
ExtVec0->getOperand(0).getNode() ||
12310 V !=
ExtVec1->getOperand(0).getNode())
12343 Ops.push_back(DAG.
getConstant(Intrinsic::arm_neon_vpaddls, dl,
12347 Ops.push_back(Vec);
12395 if (SRA.getOpcode() !=
ISD::SRA) {
12402 if (Const->getZExtValue() != 31)
12407 if (SRA.getOperand(0) !=
Mul)
12412 unsigned Opcode = 0;
12476 "Expect an ADDE or SUBE");
12480 "ADDE node has the wrong inputs");
12499 "Expect ADDC with two result values. First: i32");
12529 unsigned Opc =
MULOp->getOpcode();
12572 Ops.push_back(
LoMul->getOperand(0));
12573 Ops.push_back(
LoMul->getOperand(1));
12688 {N->getOperand(0), N->getOperand(1),
12689 AddcNode->getOperand(0), AddcNode->getOperand(1)});
12713 int32_t
imm =
C->getSExtValue();
12719 return DAG.
getNode(Opcode,
DL,
N->getVTList(),
N->getOperand(0), RHS);
12734 int64_t
imm =
C->getSExtValue();
12745 return DAG.
getNode(Opcode,
DL,
N->getVTList(),
12746 N->getOperand(0), RHS,
N->getOperand(2));
12771 SetCC =
N->getOperand(0);
12772 LHS =
SetCC->getOperand(0);
12773 RHS =
SetCC->getOperand(1);
12775 TrueVal =
N->getOperand(1);
12776 FalseVal =
N->getOperand(2);
12778 LHS =
N->getOperand(0);
12779 RHS =
N->getOperand(1);
12781 TrueVal =
N->getOperand(2);
12782 FalseVal =
N->getOperand(3);
12787 unsigned int Opcode = 0;
12816 switch (TrueVal->getOpcode()) {
12835 if (TrueVal != LHS || FalseVal != RHS)
12851 DCI.DAG.getNode(Opcode, dl,
MVT::i32, LHS, RHS->getOperand(0));
12867 EVT VT =
N->getValueType(0);
12872 Shft =
N->getOperand(0);
12879 Cmp.getOperand(0) !=
N->getOperand(1) ||
12880 Cmp.getOperand(1) !=
N->getOperand(2))
12882 Shft =
N->getOperand(1);
12892 switch (
Clamp->getSExtValue()) {
12897 case (1 << 15) - 1:
12901 case (1ULL << 31) - 1:
12924 EVT VecVT =
Ext0.getOperand(0).getValueType();
12927 if (
Ext1.getOperand(0).getValueType() != VecVT ||
12965 Parts.push_back(VQDMULH);
12989 if (
N->getOperand(0).getOpcode() !=
ISD::XOR)
12999 if (!Const || !Const->isOne())
13037 if (
DCI.isBeforeLegalize())
return SDValue();
13089 VecRed.getResNo() != 0 ||
13107 for (
unsigned I = S,
E =
VecRed.getNumOperands();
I <
E;
I++)
13108 Ops.push_back(
VecRed->getOperand(
I));
13170 if (Const->getAPIntValue().ult(256))
13172 if (
N1->getOpcode() ==
ISD::ADD && Const->getAPIntValue().slt(0) &&
13173 Const->getAPIntValue().sgt(-256))
13210 if (
DCI.isBeforeLegalize())
13229 if (ST->isThumb() && ST->isThumb1Only())
13233 for (
auto U :
N->uses()) {
13234 switch(U->getOpcode()) {
13252 if (U->getOperand(0).getOpcode() ==
ISD::SHL ||
13253 U->getOperand(1).getOpcode() ==
ISD::SHL)
13263 if (
N->getOperand(0).getOpcode() !=
ISD::SHL)
13278 C2Int.getBitWidth() -
C2->getZExtValue());
13287 unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
13288 return Imm.getBitWidth() -
Zeros > 8;
13303 SHL.dump();
N->dump());
13338 if (CSINC.getOpcode() !=
ARMISD::CSINC || !CSINC.hasOneUse())
13347 CSINC.getOperand(0)),
13348 CSINC.getOperand(1), CSINC.getOperand(2),
13349 CSINC.getOperand(3));
13361 if (
N1.getNode()->hasOneUse())
13388 VDup->getOperand(0));
13419 Opcode =
N1.getOpcode();
13429 EVT VT =
N->getValueType(0);
13440 EVT VT =
N->getValueType(0);
13452 return Op->getOperand(0);
13466 And =
And->getOperand(0);
13471 Mask = Mask->getOperand(0);
13508 EVT VT =
N->getValueType(0);
13515 if (
DCI.isBeforeLegalize() ||
DCI.isCalledByLegalizer())
13527 int64_t
MulAmt =
C->getSExtValue();
13530 ShiftAmt = ShiftAmt & (32 - 1);
13585 DCI.CombineTo(
N, Res,
false);
13593 if (
DCI.isBeforeLegalize() ||
DCI.isCalledByLegalizer())
13605 if (
C1 == 255 ||
C1 == 65535)
13622 if (!
C2 ||
C2 >= 32)
13699 EVT VT =
N->getValueType(0);
13706 APInt SplatBits, SplatUndef;
13707 unsigned SplatBitSize;
13710 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
HasAnyUndefs)) {
13711 if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
13712 SplatBitSize == 64) {
13751 SDValue SRL = OR->getOperand(0);
13752 SDValue SHL = OR->getOperand(1);
13755 SRL = OR->getOperand(1);
13756 SHL = OR->getOperand(0);
13763 if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
13788 unsigned Opcode = 0;
13810 EVT VT =
N->getValueType(0);
13837 unsigned Mask =
MaskC->getZExtValue();
13838 if (Mask == 0xffff)
13844 unsigned Val =
N1C->getZExtValue();
13845 if ((Val & ~Mask) != Val)
13855 DCI.CombineTo(
N, Res,
false);
13870 (Mask == ~
Mask2)) {
13873 if (Subtarget->
hasDSP() &&
13874 (Mask == 0xffff || Mask == 0xffff0000))
13882 DCI.CombineTo(
N, Res,
false);
13887 (~Mask ==
Mask2)) {
13890 if (Subtarget->
hasDSP() &&
13899 DCI.CombineTo(
N, Res,
false);
13920 DCI.CombineTo(
N, Res,
false);
13957 return isValidMVECond(CC,
N->getOperand(0).getValueType().isFloatingPoint());
13964 EVT VT =
N->getValueType(0);
13992 EVT VT =
N->getValueType(0);
14002 APInt SplatBits, SplatUndef;
14003 unsigned SplatBitSize;
14006 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
HasAnyUndefs)) {
14007 if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
14008 SplatBitSize == 64) {
14044 unsigned SplatBitSize;
14066 N1->getOperand(0));
14089 EVT VT =
N->getValueType(0);
14205 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
14206 "undefined behavior");
14207 unsigned Mask = (1u << Width) - 1;
14209 if ((Mask & (~
Mask2)) == 0)
14211 N->getOperand(0),
N1.getOperand(0),
N->getOperand(2));
14230 EVT VT =
N->getValueType(0);
14253 EVT VT =
N->getValueType(0);
14285 SDValue BasePtr = LD->getBasePtr();
14288 LD->getAlignment(), LD->getMemOperand()->getFlags());
14294 LD->getPointerInfo().getWithOffset(4),
14295 std::min(4U, LD->getAlignment()),
14296 LD->getMemOperand()->getFlags());
14299 if (
DCI.DAG.getDataLayout().isBigEndian())
14318 BV =
BV.getOperand(0);
14325 unsigned Offset =
InDouble.getConstantOperandVal(1) == 1 ? 2 : 0;
14332 return DCI.DAG.getMergeValues({Op0, Op1},
SDLoc(
N));
14340 if (
BV.getConstantOperandVal(2) ==
Offset)
14341 Op0 =
BV.getOperand(1);
14342 if (
BV.getConstantOperandVal(2) ==
Offset + 1)
14343 Op1 =
BV.getOperand(1);
14345 BV =
BV.getOperand(0);
14350 return DCI.DAG.getMergeValues({Op0, Op1},
SDLoc(
N));
14390 if (Copy.getValueType() ==
MVT::f32 &&
14392 SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)};
14401 if (
LN0->hasOneUse() &&
LN0->isUnindexed() &&
14404 DCI.DAG.getLoad(
N->getValueType(0),
SDLoc(
N),
LN0->getChain(),
14405 LN0->getBasePtr(),
LN0->getMemOperand());
14406 DCI.DAG.ReplaceAllUsesOfValueWith(
SDValue(
N, 0), Load.getValue(0));
14407 DCI.DAG.ReplaceAllUsesOfValueWith(Op0.
getValue(1), Load.getValue(1));
14423 EVT VT =
N->getValueType(0);
14457 unsigned NumElts =
N->getValueType(0).getVectorNumElements();
14476 if (
N->getNumOperands() == 2)
14482 EVT VT =
N->getValueType(0);
14492 DCI.AddToWorklist(V.getNode());
14514 EVT VT =
N->getValueType(0);
14518 if (
EltVT.getSizeInBits() != 32 || !
N->hasOneUse())
14527 Use->getValueType(0).isFloatingPoint())
14542 if (
Elt->getOperand(0).getValueType() ==
MVT::i32)
14574 V->getOperand(0).getValueType() ==
MVT::i32)
14580 DCI.AddToWorklist(V.getNode());
14593 EVT VT =
N->getValueType(0);
14600 if (Op->getOperand(0).getValueType() == VT)
14601 return Op->getOperand(0);
14616 if (Op.getValueType() ==
MVT::i32) {
14627 EVT VT =
N->getValueType(0);
14632 if (ST->isLittle())
14642 if (Op->getOperand(0).getValueType() == VT)
14643 return Op->getOperand(0);
14655 EVT VT =
N->getValueType(0);
14687 EVT VT =
N->getValueType(0);
14701 DCI.AddToWorklist(V.getNode());
14713 EVT VT =
N->getValueType(0);
14716 if (!
DCI.isAfterLegalizeDAG() || VT !=
MVT::i32 ||
14717 !
DCI.DAG.getTargetLoweringInfo().isTypeLegal(
MVT::f64))
14722 Ext.getOperand(0).getValueType() ==
MVT::f32)
14723 Ext = Ext.getOperand(0);
14726 Ext.getConstantOperandVal(1) % 2 != 0)
14728 if (Ext->use_size() == 1 &&
14733 SDValue Op0 = Ext.getOperand(0);
14735 unsigned Lane = Ext.getConstantOperandVal(1);
14741 return V->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
14742 isa<ConstantSDNode>(V->getOperand(1)) &&
14743 V->getConstantOperandVal(1) == Lane + 1;
14763 DCI.DAG.getConstant(Ext.getConstantOperandVal(1) / 2, dl,
MVT::i32));
14775 EVT VT =
N->getValueType(0);
14788 while (
X.getValueType() != VT &&
X->getOpcode() ==
ISD::BITCAST)
14789 X =
X->getOperand(0);
14790 if (
X.getValueType() == VT)
14798 return Op0.
getOperand(
N->getConstantOperandVal(1));
14808 unsigned Offset =
N->getConstantOperandVal(1);
14820 unsigned Idx =
N->getConstantOperandVal(1);
14834 EVT VT =
N->getValueType(0);
14839 Op.getOperand(0).getValueType().getScalarType())
14850 EVT VT =
N->getValueType(0);
14851 if (!
N->getOperand(1).isUndef() ||
N->getOperand(0).getValueType() != VT)
14857 switch (Op.getOpcode()) {
14878 for (
int i = 0, e =
NMask.size();
i != e;
i++) {
14883 return DAG.
getNode(Op.getOpcode(),
SDLoc(Op), Op.getValueType(),
14942 EVT VT =
N->getValueType(0);
14986 UE =
Addr.getNode()->use_end(); UI != UE; ++UI) {
14989 UI.getUse().getResNo() !=
Addr.getResNo())
14997 Visited.insert(
Addr.getNode());
14998 Worklist.push_back(
N);
14999 Worklist.push_back(
User);
15065 switch (
N->getOpcode()) {
15081 VecTy =
N->getValueType(0);
15082 }
else if (isIntrinsic) {
15083 VecTy =
N->getOperand(
AddrOpIdx+1).getValueType();
15085 assert(
isStore &&
"Node has to be a load, a store, or an intrinsic!");
15086 VecTy =
N->getOperand(1).getValueType();
15100 if (NumBytes >= 3 * 16 && (!
CInc ||
CInc->getZExtValue() != NumBytes)) {
15110 unsigned Alignment =
MemN->getAlignment();
15127 if (Alignment == 0)
15131 assert(
NumVecs == 1 &&
"Unexpected multi-element generic load/store.");
15159 Ops.push_back(
N->getOperand(0));
15161 Ops.push_back(Inc);
15165 Ops.push_back(
StN->getValue());
15172 Ops.push_back(
N->getOperand(
i));
15187 MemN->getMemOperand());
15212 if (
DCI.isBeforeLegalize() ||
DCI.isCalledByLegalizer())
15220 if (
DCI.isBeforeLegalize() ||
DCI.isCalledByLegalizer())
15231 if (
IntNo == Intrinsic::arm_mve_vst2q &&
15234 if (
IntNo == Intrinsic::arm_mve_vst4q &&
15240 UE =
Addr.getNode()->use_end();
15244 UI.getUse().getResNo() !=
Addr.getResNo())
15252 Visited.insert(
Addr.getNode());
15253 Worklist.push_back(
N);
15254 Worklist.push_back(
User);
15266 case Intrinsic::arm_mve_vld2q:
15270 case Intrinsic::arm_mve_vld4q:
15274 case Intrinsic::arm_mve_vst2q:
15279 case Intrinsic::arm_mve_vst4q:
15289 VecTy =
N->getValueType(0);
15291 VecTy =
N->getOperand(3).getValueType();
15299 if (!
CInc ||
CInc->getZExtValue() != NumBytes)
15315 Ops.push_back(
N->getOperand(0));
15316 Ops.push_back(
N->getOperand(2));
15317 Ops.push_back(Inc);
15319 for (
unsigned i = 3;
i <
N->getNumOperands(); ++
i)
15320 Ops.push_back(
N->getOperand(
i));
15323 MemN->getMemOperand());
15346 EVT VT =
N->getValueType(0);
15358 if (
IntNo == Intrinsic::arm_neon_vld2lane) {
15361 }
else if (
IntNo == Intrinsic::arm_neon_vld3lane) {
15364 }
else if (
IntNo == Intrinsic::arm_neon_vld4lane) {
15378 if (UI.getUse().getResNo() ==
NumVecs)
15393 SDValue Ops[] = {
VLD->getOperand(0),
VLD->getOperand(2) };
15402 unsigned ResNo = UI.getUse().
getResNo();
15427 EVT VT =
N->getValueType(0);
15433 if (!
DCI.DAG.getTargetLoweringInfo().isTypeLegal(
ExtractVT))
15436 N->getOperand(0),
N->getOperand(1));
15448 Op = Op.getOperand(0);
15453 unsigned EltSize = Op.getScalarValueSizeInBits();
15474 if (Op.getValueType() ==
MVT::f32)
15477 else if (Op.getValueType() ==
MVT::f16)
15489 if (LD && Op.hasOneUse() && LD->isUnindexed() &&
15490 LD->getMemoryVT() ==
N->getValueType(0).getVectorElementType()) {
15491 SDValue Ops[] = {LD->getOperand(0), LD->getOperand(1),
15496 LD->getMemoryVT(), LD->getMemOperand());
15506 EVT VT =
N->getValueType(0);
15510 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
15523 if (!
St->isTruncatingStore() || !VT.
isVector())
15528 assert(
StVT != VT &&
"Cannot truncate to the same type");
15588 for (
unsigned I = 0;
I <
E;
I++) {
15593 St->getAlignment(),
St->getMemOperand()->getFlags());
15596 Chains.push_back(
Ch);
15606 if (!
St->isSimple() ||
St->isTruncatingStore() || !
St->isUnindexed())
15613 if (!
ToVT.isVector())
15622 unsigned NumElements = 4;
15623 if (
FromVT.getVectorNumElements() % NumElements != 0)
15636 if (
SVN->getOperand(1).isUndef())
15643 if (M[
I] >= 0 && M[
I] != (
int)(
Off0 +
I / 2))
15645 if (M[
I + 1] >= 0 && M[
I + 1] != (
int)(
Off1 +
I / 2))
15661 Align Alignment =
St->getOriginalAlign();
15672 for (
unsigned i = 0;
i <
FromVT.getVectorNumElements() / NumElements;
i++) {
15689 Stores.push_back(Store);
15699 if (!
St->isSimple() ||
St->isTruncatingStore() || !
St->isUnindexed())
15712 Align Alignment =
St->getOriginalAlign();
15717 FromVT.getVectorNumElements());
15722 i *
FromVT.getVectorNumElements() *
ToVT.getScalarSizeInBits() / 8;
15730 Stores.push_back(Store);
15740 if (!
St->isSimple() ||
St->isTruncatingStore() || !
St->isUnindexed())
15751 {Extract.getOperand(0), Extract.getOperand(1)});
15760 Align Alignment =
St->getOriginalAlign();
15766 Alignment.
value(), MMOFlags, AAInfo);
15777 if (
St->isVolatile())
15802 StVal.getNode()->hasOneUse()) {
15809 BasePtr,
St->getPointerInfo(),
St->getOriginalAlign(),
15810 St->getMemOperand()->getFlags());
15816 OffsetPtr,
St->getPointerInfo().getWithOffset(4),
15817 St->getOriginalAlign(),
15818 St->getMemOperand()->getFlags());
15830 IntVec.getValueType().getVectorNumElements());
15833 Vec,
StVal.getOperand(1));
15839 DCI.AddToWorklist(V.getNode());
15840 return DAG.
getStore(
St->getChain(), dl, V,
St->getBasePtr(),
15841 St->getPointerInfo(),
St->getAlignment(),
15842 St->getMemOperand()->getFlags(),
St->getAAInfo());
15847 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
15868 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
15876 MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
15879 uint32_t IntBits = IntTy.getSizeInBits();
15880 unsigned NumLanes = Op.getValueType().getVectorNumElements();
15892 if (
C == -1 ||
C == 0 ||
C > 32)
15897 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
15898 Intrinsic::arm_neon_vcvtfp2fxu;
15925 unsigned OpOpcode = Op.getNode()->getOpcode();
15926 if (!
N->getValueType(0).isVector() || !
N->getValueType(0).isSimple() ||
15936 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
15937 uint32_t IntBits = IntTy.getSizeInBits();
15938 unsigned NumLanes = Op.getValueType().getVectorNumElements();
15950 if (
C == -1 ||
C == 0 ||
C > 32)
15961 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
15962 Intrinsic::arm_neon_vcvtfxu2fp;
15971 if (!ST->hasMVEIntegerOps())
15996 return AVT.getVectorNumElements() == Ty.getVectorNumElements() &&
16002 if (!
AVT.is128BitVector())
16005 128 /
AVT.getVectorMinNumElements())),
16045 Mul->getOperand(0).getScalarValueSizeInBits() * 2 >=
16046 ResVT.getScalarSizeInBits())
16054 A =
ExtA->getOperand(0);
16055 B =
ExtB->getOperand(0);
16077 Mul->getOperand(0).getScalarValueSizeInBits() * 2 >=
16078 ResVT.getScalarSizeInBits())
16086 A =
ExtA->getOperand(0);
16087 B =
ExtB->getOperand(0);
16101 EVT VT = Ops[0].getValueType();
16104 "Unexpected illegal long reduction opcode");
16119 Ext0.getValue(1),
Ext1.getValue(1));
16200 Op = Op->getOperand(1);
16202 Op->getOperand(0)->getOpcode() ==
ISD::MUL) {
16204 if (
Mul->getOperand(0) ==
Mul->getOperand(1) &&
16221 unsigned IsTop =
N->getConstantOperandVal(2);
16242 unsigned NumElts =
N->getValueType(0).getVectorNumElements();
16263 unsigned IsTop =
N->getConstantOperandVal(2);
16265 unsigned NumElts =
N->getValueType(0).getVectorNumElements();
16286 int ShiftAmt =
C->getSExtValue();
16287 if (ShiftAmt == 0) {
16293 if (ShiftAmt >= -32 && ShiftAmt < 0) {
16321 case Intrinsic::arm_neon_vshifts:
16322 case Intrinsic::arm_neon_vshiftu:
16323 case Intrinsic::arm_neon_vrshifts:
16324 case Intrinsic::arm_neon_vrshiftu:
16325 case Intrinsic::arm_neon_vrshiftn:
16326 case Intrinsic::arm_neon_vqshifts:
16327 case Intrinsic::arm_neon_vqshiftu:
16328 case Intrinsic::arm_neon_vqshiftsu:
16329 case Intrinsic::arm_neon_vqshiftns:
16330 case Intrinsic::arm_neon_vqshiftnu:
16331 case Intrinsic::arm_neon_vqshiftnsu:
16332 case Intrinsic::arm_neon_vqrshiftns:
16333 case Intrinsic::arm_neon_vqrshiftnu:
16334 case Intrinsic::arm_neon_vqrshiftnsu: {
16335 EVT VT =
N->getOperand(1).getValueType();
16340 case Intrinsic::arm_neon_vshifts:
16341 case Intrinsic::arm_neon_vshiftu:
16346 if (
isVShiftRImm(
N->getOperand(2), VT,
false,
true, Cnt)) {
16353 case Intrinsic::arm_neon_vrshifts:
16354 case Intrinsic::arm_neon_vrshiftu:
16359 case Intrinsic::arm_neon_vqshifts:
16360 case Intrinsic::arm_neon_vqshiftu:
16365 case Intrinsic::arm_neon_vqshiftsu:
16370 case Intrinsic::arm_neon_vrshiftn:
16371 case Intrinsic::arm_neon_vqshiftns:
16372 case Intrinsic::arm_neon_vqshiftnu:
16373 case Intrinsic::arm_neon_vqshiftnsu:
16374 case Intrinsic::arm_neon_vqrshiftns:
16375 case Intrinsic::arm_neon_vqrshiftnu:
16376 case Intrinsic::arm_neon_vqrshiftnsu:
16388 case Intrinsic::arm_neon_vshifts:
16389 case Intrinsic::arm_neon_vshiftu:
16392 case Intrinsic::arm_neon_vrshifts:
16395 case Intrinsic::arm_neon_vrshiftu:
16398 case Intrinsic::arm_neon_vrshiftn:
16401 case Intrinsic::arm_neon_vqshifts:
16404 case Intrinsic::arm_neon_vqshiftu:
16407 case Intrinsic::arm_neon_vqshiftsu:
16410 case Intrinsic::arm_neon_vqshiftns:
16413 case Intrinsic::arm_neon_vqshiftnu:
16416 case Intrinsic::arm_neon_vqshiftnsu:
16419 case Intrinsic::arm_neon_vqrshiftns:
16422 case Intrinsic::arm_neon_vqrshiftnu:
16425 case Intrinsic::arm_neon_vqrshiftnsu:
16435 case Intrinsic::arm_neon_vshiftins: {
16436 EVT VT =
N->getOperand(1).getValueType();
16442 else if (
isVShiftRImm(
N->getOperand(3), VT,
false,
true, Cnt))
16450 N->getOperand(1),
N->getOperand(2),
16454 case Intrinsic::arm_neon_vqrshifts:
16455 case Intrinsic::arm_neon_vqrshiftu:
16459 case Intrinsic::arm_mve_vqdmlah:
16460 case Intrinsic::arm_mve_vqdmlash:
16461 case Intrinsic::arm_mve_vqrdmlah:
16462 case Intrinsic::arm_mve_vqrdmlash:
16463 case Intrinsic::arm_mve_vmla_n_predicated:
16464 case Intrinsic::arm_mve_vmlas_n_predicated:
16465 case Intrinsic::arm_mve_vqdmlah_predicated:
16466 case Intrinsic::arm_mve_vqdmlash_predicated:
16467 case Intrinsic::arm_mve_vqrdmlah_predicated:
16468 case Intrinsic::arm_mve_vqrdmlash_predicated: {
16473 unsigned BitWidth =
N->getValueType(0).getScalarSizeInBits();
16480 case Intrinsic::arm_mve_minv:
16481 case Intrinsic::arm_mve_maxv:
16482 case Intrinsic::arm_mve_minav:
16483 case Intrinsic::arm_mve_maxav:
16484 case Intrinsic::arm_mve_minv_predicated:
16485 case Intrinsic::arm_mve_maxv_predicated:
16486 case Intrinsic::arm_mve_minav_predicated:
16487 case Intrinsic::arm_mve_maxav_predicated: {
16490 unsigned BitWidth =
N->getOperand(2)->getValueType(0).getScalarSizeInBits();
16497 case Intrinsic::arm_mve_addv: {
16505 case Intrinsic::arm_mve_addlv:
16506 case Intrinsic::arm_mve_addlv_predicated: {
16510 unsigned Opc =
IntNo == Intrinsic::arm_mve_addlv ?
16515 for (
unsigned i = 1, e =
N->getNumOperands();
i < e;
i++)
16517 Ops.push_back(
N->getOperand(
i));
16538 EVT VT =
N->getValueType(0);
16553 N->getOperand(0)->getOpcode() ==
ISD::AND &&
16554 N->getOperand(0)->hasOneUse()) {
16555 if (
DCI.isBeforeLegalize() ||
DCI.isCalledByLegalizer())
16571 if (AndMask == 255 || AndMask == 65535)
16590 if (ST->hasMVEIntegerOps() && VT ==
MVT::v2i64)
16595 switch (
N->getOpcode()) {
16608 if (
isVShiftRImm(
N->getOperand(1), VT,
false,
false, Cnt)) {
16628 if (!LD->isSimple() || !N0.
hasOneUse() || LD->isIndexed() ||
16633 if (!
ToVT.isVector())
16639 unsigned NumElements = 0;
16644 if (NumElements == 0 ||
16646 FromVT.getVectorNumElements() % NumElements != 0 ||
16654 SDValue BasePtr = LD->getBasePtr();
16655 Align Alignment = LD->getOriginalAlign();
16669 for (
unsigned i = 0;
i <
FromVT.getVectorNumElements() / NumElements;
i++) {
16677 Alignment, MMOFlags, AAInfo);
16686 for (
unsigned i = 0;
i < Loads.size();
i++) {
16712 if ((ST->hasNEON() || ST->hasMVEIntegerOps()) &&
16716 EVT VT =
N->getValueType(0);
16726 switch (
N->getOpcode()) {
16740 if (ST->hasMVEIntegerOps())
16749 if (ST->hasMVEFloatOps())
16760 EVT VT =
N->getValueType(0);
16762 if (!ST->hasMVEIntegerOps())
16864 const APInt *CV = &
C->getAPIntValue();
16882 SDValue Op0 = CMOV->getOperand(0);
16883 SDValue Op1 = CMOV->getOperand(1);
16885 auto CC =
CCNode->getAPIntValue().getLimitedValue();
16935 EVT VT =
X.getValueType();
16966 switch (
N->getOpcode()) {
16981 if (Const->isNullValue())
16983 else if (Const->isOne())
16992 if (IntOp != Intrinsic::test_start_loop_iterations &&
16993 IntOp != Intrinsic::loop_decrement_reg)
17019 bool Negate =
false;
17025 Cond =
N->getOperand(1);
17030 Cond =
N->getOperand(2);
17033 if (!Const->isOne() && !Const->isNullValue())
17035 Imm = Const->getZExtValue();
17064 "unsupported condition");
17071 &&
"expected single br user");
17082 if (IntOp == Intrinsic::test_start_loop_iterations) {
17135 EVT VT =
N->getValueType(0);
17137 SDValue LHS = Cmp.getOperand(0);
17138 SDValue RHS = Cmp.getOperand(1);
17149 LHS->getOperand(0)->hasOneUse()) {
17157 (
RHSC &&
RHSC->getZExtValue() == 0)) {
17175 EVT VT =
N->getValueType(0);
17177 SDValue LHS = Cmp.getOperand(0);
17178 SDValue RHS = Cmp.getOperand(1);
17179 SDValue FalseVal =
N->getOperand(0);
17180 SDValue TrueVal =
N->getOperand(1);
17210 if (CC ==
ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
17212 N->getOperand(3), Cmp);
17213 }
else if (CC ==
ARMCC::EQ && TrueVal == RHS) {
17226 if ((
LHS0C &&
LHS0C->getZExtValue() == 0) &&
17228 (
RHSC &&
RHSC->getZExtValue() == 0)) {
17230 LHS->getOperand(2), LHS->getOperand(3),
17231 LHS->getOperand(4));
17277 N->getOperand(3),
CPSRGlue.getValue(1));
17313 FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) ||
17317 unsigned ShiftAmount =
TrueConst->logBase2();
17331 if (Known.
Zero == 0xfffffffe)
17334 else if (Known.
Zero == 0xffffff00)
17337 else if (Known.
Zero == 0xffff0000)
17355 if (
SrcVT.getScalarSizeInBits() ==
DstVT.getScalarSizeInBits())
17370 SrcVT.getScalarSizeInBits() <=
DstVT.getScalarSizeInBits() &&
17386 EVT VT =
N->getValueType(0);
17394 if (
N->getNumOperands() == 2 &&
17398 N->getOperand(0).getOperand(1),
17399 N->getOperand(1).getOperand(0),
17400 N->getOperand(1).getOperand(1));
17403 if (
N->getNumOperands() == 2 &&
17409 if (
S0->getOperand(0) == S1->getOperand(0) &&
17410 S0->getOperand(1) == S1->getOperand(1)) {
17413 Mask.append(S1->getMask().begin(), S1->getMask().end());
17433 return Op.getOpcode() == ISD::BUILD_VECTOR ||
17434 Op.getOpcode() == ISD::VECTOR_SHUFFLE ||
17435 (Op.getOpcode() == ISD::BITCAST &&
17436 Op.getOperand(0).getOpcode() == ISD::BUILD_VECTOR);
17439 for (
unsigned Op = 0; Op <
N->getNumOperands(); Op++) {
17441 for (
unsigned i = 0;
i < O.getValueType().getVectorNumElements();
i++) {
17454 if (!
DCI.isAfterLegalizeDAG())
17459 int NumIns =
N->getNumOperands();
17461 "Expected 2 or 4 inputs to an MVETrunc");
17463 if (
N->getNumOperands() == 4)
17469 ISD::ADD,
DL, StackPtr.getValueType(), StackPtr,
17475 Chains.push_back(
Ch);
17489 if (!LD || !LD->isSimple() || !N0.
hasOneUse() || LD->isIndexed())
17494 if (!
ToVT.isVector())
17500 unsigned NumElements = 0;
17505 assert(NumElements != 0);
17518 SDValue BasePtr = LD->getBasePtr();
17519 Align Alignment = LD->getOriginalAlign();
17531 for (
unsigned i = 0;
i <
FromVT.getVectorNumElements() / NumElements;
i++) {
17539 Alignment, MMOFlags, AAInfo);
17555 EVT VT =
N->getValueType(0);
17557 assert(
N->getNumValues() == 2 &&
"Expected MVEEXT with 2 elements");
17560 EVT ExtVT =
N->getOperand(0).getValueType().getHalfNumVectorElementsVT(
17580 assert(Mask.size() ==
SVN->getValueType(0).getVectorNumElements());
17611 if (
V0.getNode() !=
N ||
V1.getNode() !=
N)
17616 if (
N->getOperand(0)->getOpcode() ==
ISD::LOAD)
17620 if (!
DCI.isAfterLegalizeDAG())
17629 "Expected 2 or 4 outputs to an MVEEXT");
17630 EVT LoadVT =
N->getOperand(0).getValueType().getHalfNumVectorElementsVT(
17632 if (
N->getNumOperands() == 4)
17638 StackPtr, MPI,
Align(4));
17643 ISD::ADD,
DL, StackPtr.getValueType(), StackPtr,
17650 Loads.push_back(Load);
17658 switch (
N->getOpcode()) {
17748 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
17755 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
17766 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
17774 unsigned LowWidth =
N->getOperand(0).getValueType().getSizeInBits();
17776 unsigned HighWidth =
N->getOperand(1).getValueType().getSizeInBits();
17784 unsigned HighWidth =
N->getOperand(0).getValueType().getSizeInBits();
17786 unsigned LowWidth =
N->getOperand(1).getValueType().getSizeInBits();
17794 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
17805 unsigned BitWidth =
N->getValueType(0).getSizeInBits();
17815 case Intrinsic::arm_neon_vld1:
17816 case Intrinsic::arm_neon_vld1x2:
17817 case Intrinsic::arm_neon_vld1x3:
17818 case Intrinsic::arm_neon_vld1x4:
17819 case Intrinsic::arm_neon_vld2:
17820 case Intrinsic::arm_neon_vld3:
17821 case Intrinsic::arm_neon_vld4:
17822 case Intrinsic::arm_neon_vld2lane:
17823 case Intrinsic::arm_neon_vld3lane:
17824 case Intrinsic::arm_neon_vld4lane:
17825 case Intrinsic::arm_neon_vld2dup:
17826 case Intrinsic::arm_neon_vld3dup:
17827 case Intrinsic::arm_neon_vld4dup:
17828 case Intrinsic::arm_neon_vst1:
17829 case Intrinsic::arm_neon_vst1x2:
17830 case Intrinsic::arm_neon_vst1x3:
17831 case Intrinsic::arm_neon_vst1x4:
17832 case Intrinsic::arm_neon_vst2:
17833 case Intrinsic::arm_neon_vst3:
17834 case Intrinsic::arm_neon_vst4:
17835 case Intrinsic::arm_neon_vst2lane:
17836 case Intrinsic::arm_neon_vst3lane:
17837 case Intrinsic::arm_neon_vst4lane:
17839 case Intrinsic::arm_mve_vld2q:
17840 case Intrinsic::arm_mve_vld4q:
17841 case Intrinsic::arm_mve_vst2q:
17842 case Intrinsic::arm_mve_vst4q:
17859 bool *
Fast)
const {
17931 if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->
hasNEON() &&
17934 if (Op.size() >= 16 &&
17935 (Op.isAligned(
Align(16)) ||
17940 }
else if (Op.size() >= 8 &&
17941 (Op.isAligned(
Align(8)) ||
17957 if (!SrcTy->isIntegerTy() || !
DstTy->isIntegerTy())
17959 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
17966 !
DstVT.isInteger())
17978 if (!
VT1.isSimple() || !
VT1.isInteger() ||
17979 !
VT2.isSimple() || !
VT2.isInteger())
17982 switch (
VT1.getSimpleVT().SimpleTy) {
18016 return Ext->getType()->getScalarSizeInBits() ==
18017 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
18034 if (!
I->getType()->isVectorTy())
18038 switch (
I->getOpcode()) {
18039 case Instruction::Sub:
18040 case Instruction::Add: {
18043 Ops.push_back(&
I->getOperandUse(0));
18044 Ops.push_back(&
I->getOperandUse(1));
18056 if (!
I->hasOneUse())
18059 return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) ==
I;
18069 switch (
I->getOpcode()) {
18070 case Instruction::Add:
18071 case Instruction::Mul:
18072 case Instruction::FAdd:
18073 case Instruction::ICmp:
18076 case Instruction::FMul:
18078 case Instruction::Sub:
18079 case Instruction::FSub:
18080 case Instruction::Shl:
18081 case Instruction::LShr:
18082 case Instruction::AShr:
18083 return Operand == 1;
18084 case Instruction::Call:
18086 switch (II->getIntrinsicID()) {
18087 case Intrinsic::fma:
18089 case Intrinsic::arm_mve_add_predicated:
18090 case Intrinsic::arm_mve_mul_predicated:
18091 case Intrinsic::arm_mve_qadd_predicated:
18092 case Intrinsic::arm_mve_hadd_predicated:
18093 case Intrinsic::arm_mve_vqdmull_predicated:
18094 case Intrinsic::arm_mve_qdmulh_predicated:
18095 case Intrinsic::arm_mve_qrdmulh_predicated:
18096 case Intrinsic::arm_mve_fma_predicated:
18098 case Intrinsic::arm_mve_sub_predicated:
18099 case Intrinsic::arm_mve_qsub_predicated:
18100 case Intrinsic::arm_mve_hsub_predicated:
18101 return Operand == 1;
18112 for (
auto OpIdx :
enumerate(
I->operands())) {
18115 if (!Op ||
any_of(Ops, [&](
Use *U) {
return U->get() == Op; }))
18132 for (
Use &U : Op->uses()) {
18140 Ops.push_back(&Op->getOperandUse(0));
18141 Ops.push_back(&OpIdx.value());
18166 if (
Ld->isExpandingLoad())
18177 if (
ExtVal->use_empty() ||
18190 if (!
Ty1->isIntegerTy() || !
Ty2->isIntegerTy())
18196 assert(
Ty1->getPrimitiveSizeInBits() <= 64 &&
"i128 is probably not a noop");
18206 unsigned AS)
const {
18209 return AM.
Scale < 0 ? 1 : 0;
18227bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(
const MachineFunction &MF,
18253 unsigned Scale = 1;
18270 if ((V & (Scale - 1)) != 0)
18285 bool IsNeg =
false;
18291 unsigned NumBytes = std::max((
unsigned)VT.
getSizeInBits() / 8, 1U);
18316 if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
18346 default:
return false;
18365 int Scale = AM.
Scale;
18370 default:
return false;
18378 Scale = Scale & ~1;
18379 return Scale == 2 || Scale == 4 || Scale == 8;
18396 if (Scale & 1)
return false;
18403 const int Scale = AM.
Scale;
18413 return (Scale == 1) || (!AM.
HasBaseReg && Scale == 2);
18429 switch (AM.
Scale) {
18446 int Scale = AM.
Scale;
18448 default:
return false;
18452 if (Scale < 0) Scale = -Scale;
18460 if (Scale == 1 || (AM.
HasBaseReg && Scale == -1))
18473 if (Scale & 1)
return false;
18493 return Imm >= 0 && Imm <= 255;
18502 int64_t
AbsImm = std::abs(Imm);
18522 int RHSC = (int)RHS->getZExtValue();
18536 int RHSC = (int)RHS->getZExtValue();
18579 int RHSC = (int)RHS->getZExtValue();
18585 }
else if (
RHSC > 0 &&
RHSC < 0x100) {
18596 bool isSEXTLoad,
bool IsMasked,
bool isLE,
18610 int RHSC = (int)RHS->getZExtValue();
18618 }
else if (
RHSC > 0 &&
RHSC < Limit * Scale &&
RHSC % Scale == 0) {
18635 }
else if (Alignment >= 4 &&
18639 else if (Alignment >= 2 &&
18662 bool isSEXTLoad =
false;
18665 Ptr = LD->getBasePtr();
18666 VT = LD->getMemoryVT();
18667 Alignment = LD->getAlign();
18670 Ptr = ST->getBasePtr();
18671 VT = ST->getMemoryVT();
18672 Alignment = ST->getAlign();
18674 Ptr = LD->getBasePtr();
18675 VT = LD->getMemoryVT();
18676 Alignment = LD->getAlign();
18680 Ptr = ST->getBasePtr();
18681 VT = ST->getMemoryVT();
18682 Alignment = ST->getAlign();
18688 bool isLegal =
false;
18720 bool isSEXTLoad =
false,
isNonExt;
18723 VT = LD->getMemoryVT();
18724 Ptr = LD->getBasePtr();
18725 Alignment = LD->getAlign();
18729 VT = ST->getMemoryVT();
18730 Ptr = ST->getBasePtr();
18731 Alignment = ST->getAlign();
18732 isNonExt = !ST->isTruncatingStore();
18734 VT = LD->getMemoryVT();
18735 Ptr = LD->getBasePtr();
18736 Alignment = LD->getAlign();
18741 VT = ST->getMemoryVT();
18742 Ptr = ST->getBasePtr();
18743 Alignment = ST->getAlign();
18744 isNonExt = !ST->isTruncatingStore();
18752 assert(Op->getValueType(0) ==
MVT::i32 &&
"Non-i32 post-inc op?!");
18756 if (!RHS || RHS->getZExtValue() != 4)
18758 if (Alignment <
Align(4))
18761 Offset = Op->getOperand(1);
18762 Base = Op->getOperand(0);
18768 bool isLegal =
false;
18805 unsigned Depth)
const {
18808 switch (Op.getOpcode()) {
18815 if (Op.getResNo() == 0) {
18816 SDValue LHS = Op.getOperand(0);
18817 SDValue RHS = Op.getOperand(1);
18841 case Intrinsic::arm_ldaex:
18842 case Intrinsic::arm_ldrex: {
18859 Known.
Zero &= Mask;
18871 "VGETLANE index out of bounds");
18876 EVT VT = Op.getValueType();
18933 EVT VT = Op.getValueType();
18946 unsigned Mask =
C->getZExtValue();
18961 return TLO.CombineTo(Op, Op.getOperand(0));
19006 unsigned Depth)
const {
19007 unsigned Opc = Op.getOpcode();
19015 if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) &&
19017 unsigned ShAmt = Op->getConstantOperandVal(2);
19020 return TLO.CombineTo(
19021 Op,
TLO.DAG.getNode(
19048 default:
return false;
19059 if (Ty && Ty->getBitWidth() == 32)
19092 unsigned S = Constraint.
size();
19094 switch (Constraint[0]) {
19106 }
else if (S == 2) {
19107 switch (Constraint[0]) {
19124 Value *CallOperandVal =
info.CallOperandVal;
19127 if (!CallOperandVal)
19136 if (type->isIntegerTy()) {
19144 if (type->isFloatingPointTy())
19151using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
19155 switch (Constraint.
size()) {
19158 switch (Constraint[0]) {
19161 return RCPair(0U, &ARM::tGPRRegClass);
19162 return RCPair(0U, &ARM::GPRRegClass);
19165 return RCPair(0U, &ARM::hGPRRegClass);
19169 return RCPair(0U, &ARM::tGPRRegClass);
19170 return RCPair(0U, &ARM::GPRRegClass);
19175 return RCPair(0U, &ARM::SPRRegClass);
19177 return RCPair(0U, &ARM::DPRRegClass);
19179 return RCPair(0U, &ARM::QPRRegClass);
19185 return RCPair(0U, &ARM::SPR_8RegClass);
19187 return RCPair(0U, &ARM::DPR_8RegClass);
19189 return RCPair(0U, &ARM::QPR_8RegClass);
19195 return RCPair(0U, &ARM::SPRRegClass);
19197 return RCPair(0U, &ARM::DPR_VFP2RegClass);
19199 return RCPair(0U, &ARM::QPR_VFP2RegClass);
19205 if (Constraint[0] ==
'T') {
19206 switch (Constraint[1]) {
19210 return RCPair(0U, &ARM::tGPREvenRegClass);
19212 return RCPair(0U, &ARM::tGPROddRegClass);
19221 if (
StringRef(
"{cc}").equals_insensitive(Constraint))
19222 return std::make_pair(
unsigned(ARM::CPSR), &ARM::CCRRegClass);
19230 std::string &Constraint,
19231 std::vector<SDValue>&Ops,
19236 if (Constraint.length() != 1)
return;
19242 case 'I':
case 'J':
case 'K':
case 'L':
19243 case 'M':
case 'N':
case 'O':
19248 int64_t
CVal64 =
C->getSExtValue();
19249 int CVal = (int)
CVal64;
19260 if (CVal >= 0 && CVal <= 65535)
19267 if (CVal >= 0 && CVal <= 255)
19269 }
else if (Subtarget->
isThumb2()) {
19288 if (CVal >= -255 && CVal <= -1)
19294 if (CVal >= -4095 && CVal <= 4095)
19307 }
else if (Subtarget->
isThumb2()) {
19330 if (CVal >= -7 && CVal < 7)
19332 }
else if (Subtarget->
isThumb2()) {
19355 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
19361 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
19369 if (CVal >= 0 && CVal <= 31)
19378 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
19387 if (Result.getNode()) {
19388 Ops.push_back(Result);
19398 "Unhandled Opcode in getDivRemLibcall");
19404 case MVT::i8:
LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8;
break;
19405 case MVT::i16:
LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16;
break;
19406 case MVT::i32:
LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32;
break;
19407 case MVT::i64:
LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64;
break;
19416 "Unhandled Opcode in getDivRemArgList");
19421 for (
unsigned i = 0, e =
N->getNumOperands();
i != e; ++
i) {
19422 EVT ArgVT =
N->getOperand(
i).getValueType();
19424 Entry.Node =
N->getOperand(
i);
19426 Entry.IsSExt = isSigned;
19427 Entry.IsZExt = !isSigned;
19428 Args.push_back(Entry);
19439 "Register-based DivRem lowering only");
19440 unsigned Opcode =
Op->getOpcode();
19442 "Invalid opcode for Div/Rem lowering");
19444 EVT VT =
Op->getValueType(0);
19455 if (
hasDivide &&
Op->getValueType(0).isSimple() &&
19458 const SDValue Dividend =
Op->getOperand(0);
19459 const SDValue Divisor =
Op->getOperand(1);
19464 SDValue Values[2] = {Div, Rem};
19489 std::pair<SDValue, SDValue> CallInfo =
LowerCallTo(CLI);
19490 return CallInfo.first;
19500 switch (
N->getValueType(0).getSimpleVT().SimpleTy) {
19518 bool isSigned =
N->getOpcode() ==
ISD::SREM;
19526 CallLoweringInfo CLI(DAG);
19534 assert(
ResNode->getNumOperands() == 2 &&
"divmod should return two operands");
19535 return ResNode->getOperand(1);
19548 "no-stack-arg-probe")) {
19559 SDValue Ops[2] = { SP, Chain };
19574 Chain =
NewSP.getValue(1);
19583 const unsigned DstSz =
Op.getValueType().getSizeInBits();
19584 const unsigned SrcSz =
SrcVal.getValueType().getSizeInBits();
19586 "Unexpected type for custom-lowering FP_EXTEND");
19589 "With both FP DP and 16, any FP conversion is legal!");
19592 "With FP16, 16 to 32 conversion is legal!");
19616 bool Supported = (
Sz == 16 ? Subtarget->
hasFP16() : Subtarget->
hasFP64());
19623 Chain =
SrcVal.getValue(1);
19629 assert(
LC != RTLIB::UNKNOWN_LIBCALL &&
19630 "Unexpected type for custom-lowering FP_EXTEND");
19645 const unsigned DstSz =
Op.getValueType().getSizeInBits();
19646 const unsigned SrcSz =
SrcVT.getSizeInBits();
19649 "Unexpected type for custom-lowering FP_ROUND");
19652 "With both FP DP and 16, any FP conversion is legal!");
19662 assert(
LC != RTLIB::UNKNOWN_LIBCALL &&
19663 "Unexpected type for custom-lowering FP_ROUND");
19674 assert(
N->getValueType(0) ==
MVT::i64 &&
"Unexpected type (!= i64) on ABS.");
19711 if (
v == 0xffffffff)
19723 bool ForCodeSize)
const {
19744 unsigned Intrinsic)
const {
19745 switch (Intrinsic) {
19746 case Intrinsic::arm_neon_vld1:
19747 case Intrinsic::arm_neon_vld2:
19748 case Intrinsic::arm_neon_vld3:
19749 case Intrinsic::arm_neon_vld4:
19750 case Intrinsic::arm_neon_vld2lane:
19751 case Intrinsic::arm_neon_vld3lane:
19752 case Intrinsic::arm_neon_vld4lane:
19753 case Intrinsic::arm_neon_vld2dup:
19754 case Intrinsic::arm_neon_vld3dup:
19755 case Intrinsic::arm_neon_vld4dup: {
19759 uint64_t
NumElts =
DL.getTypeSizeInBits(
I.getType()) / 64;
19761 Info.ptrVal =
I.getArgOperand(0);
19763 Value *AlignArg =
I.getArgOperand(
I.getNumArgOperands() - 1);
19769 case Intrinsic::arm_neon_vld1x2:
19770 case Intrinsic::arm_neon_vld1x3:
19771 case Intrinsic::arm_neon_vld1x4: {
19775 uint64_t
NumElts =
DL.getTypeSizeInBits(
I.getType()) / 64;
19777 Info.ptrVal =
I.getArgOperand(
I.getNumArgOperands() - 1);
19779 Info.align.reset();
19784 case Intrinsic::arm_neon_vst1:
19785 case Intrinsic::arm_neon_vst2:
19786 case Intrinsic::arm_neon_vst3:
19787 case Intrinsic::arm_neon_vst4:
19788 case Intrinsic::arm_neon_vst2lane:
19789 case Intrinsic::arm_neon_vst3lane:
19790 case Intrinsic::arm_neon_vst4lane: {
19797 if (!
ArgTy->isVectorTy())
19802 Info.ptrVal =
I.getArgOperand(0);
19804 Value *AlignArg =
I.getArgOperand(
I.getNumArgOperands() - 1);
19810 case Intrinsic::arm_neon_vst1x2:
19811 case Intrinsic::arm_neon_vst1x3:
19812 case Intrinsic::arm_neon_vst1x4: {
19819 if (!
ArgTy->isVectorTy())
19824 Info.ptrVal =
I.getArgOperand(0);
19826 Info.align.reset();
19831 case Intrinsic::arm_mve_vld2q:
19832 case Intrinsic::arm_mve_vld4q: {
19836 unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4;
19838 Info.ptrVal =
I.getArgOperand(0);
19845 case Intrinsic::arm_mve_vst2q:
19846 case Intrinsic::arm_mve_vst4q: {
19849 Type *VecTy =
I.getArgOperand(1)->getType();
19850 unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4;
19852 Info.ptrVal =
I.getArgOperand(0);
19859 case Intrinsic::arm_mve_vldr_gather_base:
19860 case Intrinsic::arm_mve_vldr_gather_base_predicated: {
19862 Info.ptrVal =
nullptr;
19864 Info.align =
Align(1);
19868 case Intrinsic::arm_mve_vldr_gather_base_wb:
19869 case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: {
19871 Info.ptrVal =
nullptr;
19872 Info.memVT =
MVT::getVT(
I.getType()->getContainedType(0));
19873 Info.align =
Align(1);
19877 case Intrinsic::arm_mve_vldr_gather_offset:
19878 case Intrinsic::arm_mve_vldr_gather_offset_predicated: {
19880 Info.ptrVal =
nullptr;
19884 DataVT.getVectorNumElements());
19885 Info.align =
Align(1);
19889 case Intrinsic::arm_mve_vstr_scatter_base:
19890 case Intrinsic::arm_mve_vstr_scatter_base_predicated: {
19892 Info.ptrVal =
nullptr;
19893 Info.memVT =
MVT::getVT(
I.getArgOperand(2)->getType());
19894 Info.align =
Align(1);
19898 case Intrinsic::arm_mve_vstr_scatter_base_wb:
19899 case Intrinsic::arm_mve_vstr_scatter_base_wb_predicated: {
19901 Info.ptrVal =
nullptr;
19902 Info.memVT =
MVT::getVT(
I.getArgOperand(2)->getType());
19903 Info.align =
Align(1);
19907 case Intrinsic::arm_mve_vstr_scatter_offset:
19908 case Intrinsic::arm_mve_vstr_scatter_offset_predicated: {
19910 Info.ptrVal =
nullptr;
19914 DataVT.getVectorNumElements());
19915 Info.align =
Align(1);
19919 case Intrinsic::arm_ldaex:
19920 case Intrinsic::arm_ldrex: {
19925 Info.ptrVal =
I.getArgOperand(0);
19931 case Intrinsic::arm_stlex:
19932 case Intrinsic::arm_strex: {
19937 Info.ptrVal =
I.getArgOperand(1);
19943 case Intrinsic::arm_stlexd:
19944 case Intrinsic::arm_strexd:
19947 Info.ptrVal =
I.getArgOperand(2);
19949 Info.align =
Align(8);
19953 case Intrinsic::arm_ldaexd:
19954 case Intrinsic::arm_ldrexd:
19957 Info.ptrVal =
I.getArgOperand(0);
19959 Info.align =
Align(8);
19974 assert(Ty->isIntegerTy());
19976 unsigned Bits = Ty->getPrimitiveSizeInBits();
19977 if (Bits == 0 || Bits > 32)
19983 unsigned Index)
const {
19987 return (Index == 0 || Index ==
ResVT.getVectorNumElements());
19992 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20001 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
20002 Builder.getInt32(0), Builder.getInt32(7),
20003 Builder.getInt32(10), Builder.getInt32(5)};
20004 return Builder.CreateCall(
MCR, args);
20068 unsigned Size =
SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
20128 return InsertFencesForAtomic;
20141 M.getOrInsertGlobal(
"__security_cookie",
20149 F->addAttribute(1, Attribute::AttrKind::InReg);
20155 return M.getGlobalVariable(
"__security_cookie");
20167 unsigned &Cost)
const {
20210 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20216 if (ValueTy->getPrimitiveSizeInBits() == 64) {
20218 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
20224 Value *Lo = Builder.CreateExtractValue(
LoHi, 0,
"lo");
20225 Value *Hi = Builder.CreateExtractValue(
LoHi, 1,
"hi");
20228 Lo = Builder.CreateZExt(Lo, ValueTy,
"lo64");
20229 Hi = Builder.CreateZExt(Hi, ValueTy,
"hi64");
20230 return Builder.CreateOr(
20238 return Builder.CreateTruncOrBitCast(Builder.CreateCall(
Ldrex,
Addr), ValueTy);
20245 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20252 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
20260 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
20265 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32),
Int32Ty,
"hi");
20269 return Builder.CreateCall(
Strex, {Lo, Hi,
Addr});
20276 return Builder.CreateCall(
20277 Strex, {Builder.CreateZExtOrBitCast(
20278 Val,
Strex->getFunctionType()->getParamType(0)),
20292 return (
DL.getTypeSizeInBits(VecTy) + 127) / 128;
20299 unsigned VecSize =
DL.getTypeSizeInBits(VecTy);
20354 "Invalid interleave factor");
20355 assert(!Shuffles.empty() &&
"Empty shufflevector input");
20356 assert(Shuffles.size() == Indices.size() &&
20357 "Unmatched number of shufflevectors and indices");
20360 Type *EltTy = VecTy->getElementType();
20383 if (NumLoads > 1) {
20387 VecTy->getNumElements() / NumLoads);
20392 BaseAddr = Builder.CreateBitCast(
20402 Type *
Tys[] = {VecTy, Int8Ptr};
20404 Intrinsic::arm_neon_vld3,
20405 Intrinsic::arm_neon_vld4};
20410 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
20413 return Builder.CreateCall(
VldnFunc, Ops,
"vldN");
20415 assert((Factor == 2 || Factor == 4) &&
20416 "expected interleave factor of 2 or 4 for MVE");
20418 Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
20426 Ops.push_back(Builder.CreateBitCast(BaseAddr,
VecEltTy));
20427 return Builder.CreateCall(
VldnFunc, Ops,
"vldN");
20440 BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr,
20441 VecTy->getNumElements() * Factor);
20447 for (
unsigned i = 0;
i < Shuffles.size();
i++) {
20449 unsigned Index = Indices[
i];
20455 SubVec = Builder.CreateIntToPtr(
20471 SVI->replaceAllUsesWith(
WideVec);
20505 unsigned Factor)
const {
20507 "Invalid interleave factor");
20510 assert(VecTy->getNumElements() % Factor == 0 &&
"Invalid interleaved store");
20512 unsigned LaneLen = VecTy->getNumElements() / Factor;
20513 Type *EltTy = VecTy->getElementType();
20517 Align Alignment =
SI->getAlign();
20534 Type *IntTy =
DL.getIntPtrType(EltTy);
20539 Op0 = Builder.CreatePtrToInt(Op0,
IntVecTy);
20540 Op1 = Builder.CreatePtrToInt(Op1,
IntVecTy);
20546 Value *BaseAddr =
SI->getPointerOperand();
20548 if (NumStores > 1) {
20557 BaseAddr = Builder.CreateBitCast(
20559 SubVecTy->getElementType()->getPointerTo(
SI->getPointerAddressSpace()));
20570 Intrinsic::arm_neon_vst3,
20571 Intrinsic::arm_neon_vst4};
20579 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
20581 Ops.push_back(Builder.getInt32(
SI->getAlignment()));
20582 Builder.CreateCall(
VstNFunc, Ops);
20584 assert((Factor == 2 || Factor == 4) &&
20585 "expected interleave factor of 2 or 4 for MVE");
20587 Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
20589 SI->getPointerAddressSpace());
20595 Ops.push_back(Builder.CreateBitCast(BaseAddr,
EltPtrTy));
20597 for (
unsigned F = 0;
F < Factor;
F++) {
20598 Ops.push_back(Builder.getInt32(
F));
20599 Builder.CreateCall(
VstNFunc, Ops);
20605 for (
unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
20608 if (StoreCount > 0)
20609 BaseAddr = Builder.CreateConstGEP1_32(
SubVecTy->getElementType(),
20615 for (
unsigned i = 0;
i < Factor;
i++) {
20617 if (Mask[
IdxI] >= 0) {
20618 Shuffles.push_back(Builder.CreateShuffleVector(
20624 if (Mask[
IdxJ * Factor +
IdxI] >= 0) {
20635 Shuffles.push_back(Builder.CreateShuffleVector(
20654 uint64_t &Members) {
20656 for (
unsigned i = 0;
i < ST->getNumElements(); ++
i) {
20666 Members +=
SubMembers * AT->getNumElements();
20667 }
else if (Ty->isFloatTy()) {
20672 }
else if (Ty->isDoubleTy()) {
20684 return VT->getPrimitiveSizeInBits().getFixedSize() == 64;
20686 return VT->getPrimitiveSizeInBits().getFixedSize() == 128;
20688 switch (VT->getPrimitiveSizeInBits().getFixedSize()) {
20701 return (Members > 0 && Members <= 4);
20708 if (!
ArgTy->isVectorTy())
20722 if (getEffectiveCallingConv(CallConv, isVarArg) !=
20727 uint64_t Members = 0;
20731 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
20736 const Constant *PersonalityFn)
const {
20743 const Constant *PersonalityFn)
const {
20755void ARMTargetLowering::insertCopiesSplitCSR(
20769 RC = &ARM::GPRRegClass;
20770 else if (ARM::DPRRegClass.
contains(*
I))
20771 RC = &ARM::DPRRegClass;
20781 assert(Entry->getParent()->getFunction().hasFnAttribute(
20782 Attribute::NoUnwind) &&
20783 "Function should be nounwind in insertCopiesSplitCSR!");
20784 Entry->addLiveIn(*
I);
20789 for (
auto *Exit : Exits)
unsigned const MachineRegisterInfo * MRI
static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt)
isVShiftRImm - Check if this is a valid build_vector for the immediate operand of a vector shift righ...
static bool areExtractExts(Value *Ext1, Value *Ext2)
Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth of the vector elements.
static EVT getExtensionTo64Bits(const EVT &OrigVT)
static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, bool isSigned)
static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG)
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG)
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt)
getVShiftImm - Check if this is a valid build_vector for the immediate operand of a vector shift oper...
static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG)
static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V)
static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt)
isVShiftLImm - Check if this is a valid build_vector for the immediate operand of a vector shift left...
static bool isSignExtended(SDNode *N, SelectionDAG &DAG)
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG)
static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls)
Return true if the calling convention is one that we can guarantee TCO for.
static const unsigned PerfectShuffleTable[6561+1]
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
amdgpu aa AMDGPU Address space based Alias Analysis Wrapper
static bool isConstant(const MachineInstr &MI)
amdgpu Simplify well known AMD library false FunctionCallee Callee
amdgpu Simplify well known AMD library false FunctionCallee Value * Arg
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG)
static bool isStore(int Opcode)
static bool isThumb(const MCSubtargetInfo &STI)
static SDValue PerformExtractEltToVMOVRRD(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, const TargetInstrInfo *TII)
MatchingStackOffset - Return true if the given stack call argument is already available in the same p...
static SDValue PerformVQDMULHCombine(SDNode *N, SelectionDAG &DAG)
static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op, SelectionDAG &DAG)
static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue FlattenVectorShuffle(ShuffleVectorSDNode *N, SelectionDAG &DAG)
static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG, const EVT &OrigTy, const EVT &ExtTy, unsigned ExtOpcode)
AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total value size to 64 bits.
static cl::opt< unsigned > ConstpoolPromotionMaxSize("arm-promote-constant-max-size", cl::Hidden, cl::desc("Maximum size of constant to promote into a constant pool"), cl::init(64))
static bool isZeroOrAllOnes(SDValue N, bool AllOnes)
static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static bool isVTBLMask(ArrayRef< int > M, EVT VT)
static SDValue PerformSUBCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
static cl::opt< bool > EnableConstpoolPromotion("arm-promote-constant", cl::Hidden, cl::desc("Enable / disable promotion of unnamed_addr constants into " "constant pools"), cl::init(false))
static SDValue PerformExtractFpToIntStores(StoreSDNode *St, SelectionDAG &DAG)
static SDValue PerformVDUPCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
static SDValue PerformExtractEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
static const APInt * isPowerOf2Constant(SDValue V)
static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD) can replace combinations of ...
static SDValue PerformVMOVhrCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG)
static SDValue LowerVECTOR_SHUFFLEUsingOneOff(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
static bool isValidMVECond(unsigned CC, bool IsFloat)
static SDValue PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC)
IntCCToARMCC - Convert a DAG integer condition code to an ARM CC.
static SDValue PerformSTORECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformSTORECombine - Target-specific dag combine xforms for ISD::STORE.
static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry, SelectionDAG &DAG)
static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static bool isGTorGE(ISD::CondCode CC)
static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1) intrinsic,...
static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask)
static bool isReverseMask(ArrayRef< int > M, EVT VT)
static bool isVZIP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of "vector_shuffle v,...
static SDValue PerformSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformVECTOR_REG_CASTCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG)
static bool isVTRNMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static bool CanInvertMVEVCMP(SDValue N)
static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG)
static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformShiftCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
PerformShiftCombine - Checks for immediate versions of vector shifts and lowers them.
static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode, ARMCC::CondCodes &CondCode2)
FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static EVT getVectorTyFromPredicateVector(EVT VT)
static SDValue PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
static bool isSRL16(const SDValue &Op)
static SDValue PerformVMOVrhCombine(SDNode *N, SelectionDAG &DAG)
static SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes, TargetLowering::DAGCombinerInfo &DCI)
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static Register genTPEntry(MachineBasicBlock *TpEntry, MachineBasicBlock *TpLoopBody, MachineBasicBlock *TpExit, Register OpSizeReg, const TargetInstrInfo *TII, DebugLoc Dl, MachineRegisterInfo &MRI)
Adds logic in loop entry MBB to calculate loop iteration count and adds t2WhileLoopSetup and t2WhileL...
static bool isLTorLE(ISD::CondCode CC)
static SDValue PerformVCMPCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl, SelectionDAG &DAG)
static SDValue PerformBITCASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG)
static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr, MachineBasicBlock *BB, const TargetRegisterInfo *TRI)
static bool hasNormalLoadOperand(SDNode *N)
hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node are normal,...
static SDValue PerformInsertEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
PerformInsertEltCombine - Target-specific dag combine xforms for ISD::INSERT_VECTOR_ELT.
static SDValue PerformVDUPLANECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVDUPLANECombine - Target-specific dag combine xforms for ARMISD::VDUPLANE.
static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG, const ARMSubtarget *ST)
static cl::opt< unsigned > ConstpoolPromotionMaxTotal("arm-promote-constant-max-total", cl::Hidden, cl::desc("Maximum size of ALL constants to promote into a constant pool"), cl::init(128))
static SDValue LowerTruncatei1(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static RTLIB::Libcall getDivRemLibcall(const SDNode *N, MVT::SimpleValueType SVT)
static SDValue PerformABSCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG &DAG)
SkipLoadExtensionForVMULL - return a load of the original vector size that does not do any sign/zero ...
static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static const MCPhysReg GPRArgRegs[]
static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombineWithOperands - Try DAG combinations for an ADD with operands N0 and N1.
static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT, SelectionDAG &DAG)
static bool isVZIPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue PerformORCombineToSMULWBT(SDNode *OR, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static bool isVTRN_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of "vector_shuffle v,...
static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue FindBFIToCombineWith(SDNode *N)
static SDValue LowerADDSUBSAT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT, SelectionDAG &DAG)
static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode, bool &swpCmpOps, bool &swpVselOps)
static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static bool isS16(const SDValue &Op, SelectionDAG &DAG)
static bool isSRA16(const SDValue &Op)
static SDValue AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
static SDValue LowerInterruptReturn(SmallVectorImpl< SDValue > &RetOps, const SDLoc &DL, SelectionDAG &DAG)
static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl, SelectionDAG &DAG)
static void expandf64Toi32(SDValue Op, SelectionDAG &DAG, SDValue &RetVal1, SDValue &RetVal2)
static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue PerformVLDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool isSHL16(const SDValue &Op)
static bool isVEXTMask(ArrayRef< int > M, EVT VT, bool &ReverseVEXT, unsigned &Imm)
static SDValue PerformMVEVLDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue PerformADDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2)
Return the load opcode for a given load size.
static bool isLegalT2AddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
static bool isLegalMVEShuffleOp(unsigned PFEntry)
static SDValue PerformSignExtendInregCombine(SDNode *N, SelectionDAG &DAG)
static SDValue PerformShuffleVMOVNCombine(ShuffleVectorSDNode *N, SelectionDAG &DAG)
static bool isVUZPMask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG)
PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for ISD::VECTOR_SHUFFLE.
static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG)
SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND, ANY_EXTEND,...
static bool isVMOVNTruncMask(ArrayRef< int > M, EVT ToVT, bool rev)
static SDValue PerformVQMOVNCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static MachineBasicBlock * OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ)
static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue PerformAddcSubcCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformVSELECTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static TargetLowering::ArgListTy getDivRemArgList(const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget)
static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getZeroVector - Returns a vector of specified type with all zero elements.
static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG)
static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St, SelectionDAG &DAG)
static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
static ARMCC::CondCodes getVCMPCondCode(SDValue N)
static cl::opt< bool > ARMInterworking("arm-interworking", cl::Hidden, cl::desc("Enable / disable ARM interworking (for debugging only)"), cl::init(true))
static void ReplaceREADCYCLECOUNTER(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformORCombineToBFI(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes, SDValue &CC, bool &Invert, SDValue &OtherOp, SelectionDAG &DAG)
static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerBUILD_VECTORToVIDUP(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue PerformLOADCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static bool isZeroVector(SDValue N)
static SDValue PerformAddeSubeCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static void ReplaceCMP_SWAP_64Results(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG)
static bool isLowerSaturate(const SDValue LHS, const SDValue RHS, const SDValue TrueVal, const SDValue FalseVal, const ISD::CondCode CC, const SDValue K)
static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG)
static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned StSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment store operation with given size.
static bool isVMOVNMask(ArrayRef< int > M, EVT VT, bool Top, bool SingleSource)
static SDValue CombineBaseUpdate(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
CombineBaseUpdate - Target-specific DAG combine function for VLDDUP, NEON load/store intrinsics,...
static SDValue LowerSaturatingConditional(SDValue Op, SelectionDAG &DAG)
static SDValue PerformSubCSINCCombine(SDNode *N, SelectionDAG &DAG)
static SDValue PerformVMOVRRDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMOVRRDCombine - Target-specific dag combine xforms for ARMISD::VMOVRRD.
static SDValue PerformVMOVNCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI)
static SDValue LowerVectorExtend(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain)
static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op, ArrayRef< int > ShuffleMask, SelectionDAG &DAG)
static SDValue PerformVMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformVMULCombine Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the special multi...
static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG)
static SDValue PerformBFICombine(SDNode *N, SelectionDAG &DAG)
static SDValue PerformORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformORCombine - Target-specific dag combine xforms for ISD::OR.
static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG)
static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG)
static SDValue PerformTruncatingStoreCombine(StoreSDNode *St, SelectionDAG &DAG)
static unsigned SelectPairHalf(unsigned Elements, ArrayRef< int > Mask, unsigned Index)
static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos, const TargetInstrInfo *TII, const DebugLoc &dl, unsigned LdSize, unsigned Data, unsigned AddrIn, unsigned AddrOut, bool IsThumb1, bool IsThumb2)
Emit a post-increment load operation with given size.
static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG, const ARMSubtarget *ST, const SDLoc &dl)
static SDValue PerformXORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment, bool isSEXTLoad, bool IsMasked, bool isLE, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
std::pair< unsigned, const TargetRegisterClass * > RCPair
static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, TargetLowering::DAGCombinerInfo &DCI, bool AllOnes=false)
static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND, ISD::ZERO_EXTEND,...
static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
cl::opt< unsigned > MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden, cl::desc("Maximum interleave factor for MVE VLDn to generate."), cl::init(2))
static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef, unsigned SplatBitSize, SelectionDAG &DAG, const SDLoc &dl, EVT &VT, EVT VectorVT, VMOVModImmType type)
isVMOVModifiedImm - Check if the specified splat value corresponds to a valid vector constant for a N...
static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC, SelectionDAG &DAG)
BC is a bitcast that is about to be turned into a VMOVDRR.
static SDValue promoteToConstantPool(const ARMTargetLowering *TLI, const GlobalValue *GV, SelectionDAG &DAG, EVT PtrVT, const SDLoc &dl)
static unsigned isNEONTwoResultShuffleMask(ArrayRef< int > ShuffleMask, EVT VT, unsigned &WhichResult, bool &isV_UNDEF)
Check if ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN), and return the corresponding AR...
static bool BitsProperlyConcatenate(const APInt &A, const APInt &B)
static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT, bool isSEXTLoad, SDValue &Base, SDValue &Offset, bool &isInc, SelectionDAG &DAG)
static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG)
static bool allUsersAreInFunction(const Value *V, const Function *F)
Return true if all users of V are within function F, looking through ConstantExprs.
static bool isSingletonVEXTMask(ArrayRef< int > M, EVT VT, unsigned &Imm)
static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG)
PerformVMOVDRRCombine - Target-specific dag combine xforms for ARMISD::VMOVDRR.
static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V, SDValue &SatK)
static bool isLegalAddressImmediate(int64_t V, EVT VT, const ARMSubtarget *Subtarget)
isLegalAddressImmediate - Return true if the integer value can be used as the offset of the target ad...
static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static bool isLegalT1AddressImmediate(int64_t V, EVT VT)
static SDValue CombineANDShift(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG)
static SDValue PerformSHLSimplify(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
static SDValue PerformADDECombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformADDECombine - Target-specific dag combine transform from ARMISD::ADDC, ARMISD::ADDE,...
static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerTruncate(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformHWLoopCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *ST)
static SDValue PerformSplittingMVETruncToNarrowingStores(StoreSDNode *St, SelectionDAG &DAG)
static bool isVUZP_v_undef_Mask(ArrayRef< int > M, EVT VT, unsigned &WhichResult)
isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of "vector_shuffle v,...
static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base, uint64_t &Members)
static SDValue PerformMULCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD) can replace combinations of ...
static SDValue PerformANDCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static SDValue PerformADDVecReduce(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG)
static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm, bool &Negate)
static bool canChangeToInt(SDValue Op, bool &SeenZero, const ARMSubtarget *Subtarget)
canChangeToInt - Given the fp compare operand, return true if it is suitable to morph to an integer c...
static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2)
Return the store opcode for a given store size.
static bool IsVUZPShuffleNode(SDNode *N)
static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget, MachineInstr &MI, const SDNode *Node)
Attaches vregs to MEMCPY that it will use as scratch registers when it is expanded into LDM/STM.
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
static SDValue findMUL_LOHI(SDValue V)
static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue PerformORCombine_i1(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *Subtarget)
static SDValue PerformSplittingMVEEXTToWideningLoad(SDNode *N, SelectionDAG &DAG)
static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG)
static void genTPLoopBody(MachineBasicBlock *TpLoopBody, MachineBasicBlock *TpEntry, MachineBasicBlock *TpExit, const TargetInstrInfo *TII, DebugLoc Dl, MachineRegisterInfo &MRI, Register OpSrcReg, Register OpDestReg, Register ElementCountReg, Register TotalIterationsReg, bool IsMemcpy)
Adds logic in the loopBody MBB to generate MVE_VCTP, t2DoLoopDec and t2DoLoopEnd.
static SDValue PerformBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const ARMSubtarget *Subtarget)
PerformBUILD_VECTORCombine - Target-specific dag combine xforms for ISD::BUILD_VECTOR.
static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG, const ARMSubtarget *ST)
static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST)
PerformMinMaxCombine - Target-specific DAG combining for creating truncating saturates.
Function Alias Analysis Results
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
SmallVector< MachineOperand, 4 > Cond
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< ShadowStackGC > C("shadow-stack", "Very portable GC for uncooperative code generators")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static Optional< bool > isBigEndian(const SmallDenseMap< int64_t, int64_t, 8 > &MemOffset2Idx, int64_t LowestIdx)
Given a map from byte offsets in memory to indices in a load/store, determine if that map corresponds...
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static Function * getFunction(Constant *C)
const HexagonInstrInfo * TII
std::pair< Value *, Value * > ShuffleOps
We are building a shuffle to create V, which is a sequence of insertelement, extractelement pairs.
static Value * LowerCTPOP(LLVMContext &Context, Value *V, Instruction *IP)
Emit the code to lower ctpop of V before the specified instruction IP.
loop Loop Strength Reduction
static M68kRelType getType(unsigned Kind, MCSymbolRefExpr::VariantKind &Modifier, bool &IsPCRel)
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
unsigned const TargetRegisterInfo * TRI
Promote Memory to Register
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
StandardInstrumentations SI(Debug, VerifyEach)
const char LLVMTargetMachineRef LLVMPassBuilderOptionsRef Options
const char LLVMTargetMachineRef TM
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
separate const offset from Split GEPs to a variadic base and a constant offset for better CSE
#define STATISTIC(VARNAME, DESC)
This file describes how to lower LLVM code to machine code.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static constexpr int Concat[]
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool sgt(const APInt &RHS) const
Signed greater than comparison.
static APInt getAllOnesValue(unsigned numBits)
Get the all-ones value.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSplat(unsigned NewLen, const APInt &V)
Return a value containing V broadcasted over NewLen bits.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
bool isAllOnesValue() const
Determine if all bits are set.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Get a value with low bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Get a value with high bits set.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
int64_t getSExtValue() const
Get sign extended value.
virtual const ARMBaseRegisterInfo & getRegisterInfo() const =0
const uint32_t * getSjLjDispatchPreservedMask(const MachineFunction &MF) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
ARMConstantPoolValue - ARM specific constantpool value.
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
int getVarArgsFrameIndex() const
int getPromotedConstpoolIncrease() const
SmallPtrSet< const GlobalVariable *, 2 > & getGlobalsPromotedToConstantPool()
void setArgumentStackToRestore(unsigned v)
unsigned createPICLabelUId()
void setPromotedConstpoolIncrease(int Sz)
bool isThumb1OnlyFunction() const
void setArgRegsSaveSize(unsigned s)
bool isCmseNSEntryFunction() const
void setReturnRegsCount(unsigned s)
void setVarArgsFrameIndex(int Index)
void markGlobalAsPromotedToConstantPool(const GlobalVariable *GV)
Indicate to the backend that GV has had its storage changed to inside a constant pool.
void setIsSplitCSR(bool s)
void setArgumentStackSize(unsigned size)
bool isTargetMachO() const
bool hasVMLxForwarding() const
bool hasRetAddrStack() const
bool isTargetAEABI() const
bool supportsTailCall() const
const Triple & getTargetTriple() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
bool hasFPARMv8Base() const
bool isTargetWindows() const
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
const ARMTargetLowering * getTargetLowering() const override
bool hasDivideInThumbMode() const
bool isTargetDarwin() const
const ARMBaseRegisterInfo * getRegisterInfo() const override
bool isTargetAndroid() const
bool isTargetCOFF() const
bool isTargetGNUAEABI() const
bool hasV8_1MMainlineOps() const
bool isTargetWatchOS() const
bool preferISHSTBarriers() const
bool genLongCalls() const
bool isFPBrccSlow() const
bool useNEONForSinglePrecisionFP() const
const InstrItineraryData * getInstrItineraryData() const override
getInstrItins - Return the instruction itineraries based on subtarget selection.
bool isTargetWatchABI() const
bool hasDataBarrier() const
bool hasAnyDataBarrier() const
bool allowsUnalignedMem() const
bool isTargetMuslAEABI() const
bool useSoftFloat() const
bool hasMPExtension() const
bool hasMVEFloatOps() const
bool hasDivideInARMMode() const
unsigned getPrefLoopLogAlignment() const
bool isTargetHardFloat() const
bool hasV8MBaselineOps() const
bool hasMVEIntegerOps() const
bool hasAcquireRelease() const
bool genExecuteOnly() const
bool isReadOnly(const GlobalValue *GV) const
bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize, unsigned &PrefAlign) const override
Return true if the pointer arguments to CI should be aligned by aligning the object whose address is ...
unsigned getMaxSupportedInterleaveFactor() const override
Get the maximum supported factor for interleaved memory accesses.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override
Returns how the given (atomic) load should be expanded by the IR-level AtomicExpand pass.
unsigned getNumInterleavedAccesses(VectorType *VecTy, const DataLayout &DL) const
Returns the number of interleaved accesses that will be generated when lowering accesses of the given...
bool shouldInsertFencesForAtomic(const Instruction *I) const override
Whether AtomicExpandPass should automatically insert fences and reduce ordering for this atomic.
Align getABIAlignmentForCallingConv(Type *ArgTy, const DataLayout &DL) const override
Return the correct alignment for the current calling convention.
void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
bool isDesirableToCommuteWithShift(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to move this shift by a constant amount though its operand,...
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
const ARMSubtarget * getSubtarget() const
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const
bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const
Returns true if the addressing mode representing by AM is legal for the Thumb1 target,...
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
bool shouldSinkOperands(Instruction *I, SmallVectorImpl< Use * > &Ops) const override
Check if sinking I's operands to I's basic block is profitable, because the operands can be folded in...
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const override
bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override
Return true if SHIFT instructions should be expanded to SHIFT_PARTS instructions, and false if a libr...
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override
Returns true if the given (atomic) store should be expanded by the IR-level AtomicExpand pass into an...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool isFNegFree(EVT VT) const override
Return true if an fneg operation is free to the point where it is never worthwhile to replace it with...
void finalizeLowering(MachineFunction &MF) const override
Execute target specific actions to finalize target lowering.
SDValue PerformMVETruncCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize=false) const override
isFPImmLegal - Returns true if the target can instruction select the specified FP immediate natively.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint letter, return the type of constraint it is for this target.
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
Function * getSSPStackGuardCheck(const Module &M) const override
If the target has a standard stack protection check function that performs validation and error handl...
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
SDValue PerformIntrinsicCombine(SDNode *N, DAGCombinerInfo &DCI) const
PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const override
allowsMisalignedMemoryAccesses - Returns true if the target allows unaligned memory accesses of the s...
bool ExpandInlineAsm(CallInst *CI) const override
This hook allows the target to expand an inline asm call to be explicit llvm code if it wants to.
SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const
PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
SDValue PerformMVEExtCombine(SDNode *N, DAGCombinerInfo &DCI) const
bool shouldFoldConstantShiftPairToMask(const SDNode *N, CombineLevel Level) const override
Return true if it is profitable to fold a pair of shifts into a mask.
bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &OriginalDemandedBits, const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const override
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the value type to use for ISD::SETCC.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
Value * emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const override
Perform a store-conditional operation to Addr.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
This callback is invoked for operations that are unsupported by the target, which are registered to u...
CCAssignFn * CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo) const override
createFastISel - This method returns a target specific FastISel object, or null if the target does no...
void AdjustInstrPostInstrSelection(MachineInstr &MI, SDNode *Node) const override
This method should be implemented by targets that mark instructions with the 'hasPostISelHook' flag.
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, unsigned Index) const override
Return true if EXTRACT_SUBVECTOR is cheap for this result type with this index.
bool isCheapToSpeculateCttz() const override
Return true if it is cheap to speculate a call to intrinsic cttz.
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
getTgtMemIntrinsic - Represent NEON load and store intrinsics as MemIntrinsicNodes.
bool isTruncateFree(Type *SrcTy, Type *DstTy) const override
Return true if it's free to truncate a value of type FromTy to type ToTy.
bool isShuffleMaskLegal(ArrayRef< int > M, EVT VT) const override
isShuffleMaskLegal - Targets can use this to indicate that they only support some VECTOR_SHUFFLE oper...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
bool useLoadStackGuardNode() const override
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const override
getRegClassFor - Return the register class that should be used for the specified value type.
std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const override
Return the largest legal super-reg register class of the register class for the specified type and it...
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, TargetLoweringOpt &TLO) const override
bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI, unsigned Factor) const override
Lower an interleaved store into a vstN intrinsic.
InstructionCost getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS) const override
getScalingFactorCost - Return the cost of the scaling used in addressing mode represented by AM.
bool isCheapToSpeculateCtlz() const override
Return true if it is cheap to speculate a call to intrinsic ctlz.
ARMTargetLowering(const TargetMachine &TM, const ARMSubtarget &STI)
SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const
PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
const char * getTargetNodeName(unsigned Opcode) const override
This method returns the name of a target specific DAG node.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
Type * shouldConvertSplatType(ShuffleVectorInst *SVI) const override
Given a shuffle vector SVI representing a vector splat, return a new scalar type of size equal to SVI...
Value * emitLoadLinked(IRBuilderBase &Builder, Type *ValueTy, Value *Addr, AtomicOrdering Ord) const override
Perform a load-linked operation on Addr, returning a "Value *" with the corresponding pointee type.
Instruction * makeDMB(IRBuilderBase &Builder, ARM_MB::MemBOpt Domain) const
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
const char * LowerXConstraint(EVT ConstraintVT) const override
Try to replace an X constraint, which matches anything, with another that has more specific requireme...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
Returns the target specific optimal type for load and store operations as a result of memset,...
bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override
Return true if it is profitable for dag combiner to transform a floating point op of specified opcode...
TargetLoweringBase::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
CCAssignFn * CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const
bool isLegalInterleavedAccessType(unsigned Factor, FixedVectorType *VecTy, Align Alignment, const DataLayout &DL) const
Returns true if VecTy is a legal interleaved access type.
bool isVectorLoadExtDesirable(SDValue ExtVal) const override
Return true if folding a vector load into ExtVal (a sign, zero, or any extend node) is profitable.
bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx, unsigned &Cost) const override
Return true if the target can combine store(extractelement VectorTy, Idx).
bool lowerInterleavedLoad(LoadInst *LI, ArrayRef< ShuffleVectorInst * > Shuffles, ArrayRef< unsigned > Indices, unsigned Factor) const override
Lower an interleaved load into a vldN intrinsic.
bool useSoftFloat() const override
bool alignLoopsWithOptSize() const override
Should loops be aligned even when the function is marked OptSize (but not MinSize).
SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override
Return true if a truncation from FromTy to ToTy is permitted when deciding whether a call is in tail ...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
bool functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv, bool isVarArg, const DataLayout &DL) const override
Returns true if an argument of type Ty needs to be passed in a contiguous block of registers in calli...
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPostIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mo...
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
This class represents an incoming formal argument to a Function.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
bool isFloatingPointOperation() const
bool hasFnAttribute(Attribute::AttrKind Kind) const
Equivalent to hasAttribute(AttributeList::FunctionIndex, Kind) but may be faster.
LLVM Basic Block Representation.
The address of a basic block.
static BranchProbability getZero()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
CCState - This class holds information needed while lowering arguments and return values.
void getInRegsParamInfo(unsigned InRegsParamRecordIndex, unsigned &BeginReg, unsigned &EndReg) const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
static bool resultsCompatible(CallingConv::ID CalleeCC, CallingConv::ID CallerCC, MachineFunction &MF, LLVMContext &C, const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn CalleeFn, CCAssignFn CallerFn)
Returns true if the results of the two calling conventions are compatible.
unsigned getNextStackOffset() const
getNextStackOffset - Return the next stack offset such that all stack slots satisfy their alignment r...
void rewindByValRegsInfo()
unsigned getInRegsParamsProcessed() const
void AnalyzeFormalArguments(const SmallVectorImpl< ISD::InputArg > &Ins, CCAssignFn Fn)
AnalyzeFormalArguments - Analyze an array of argument values, incorporating info about the formals in...
unsigned getInRegsParamsCount() const
CCValAssign - Represent assignment of one arg/retval to a location.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static Constant * get(LLVMContext &Context, ArrayRef< ElementTy > Elts)
get() constructor - Return a constant with array type with an element count and element type matching...
const APFloat & getValueAPF() const
ConstantFP - Floating Point Values [float, double].
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
This is an important base class in LLVM.
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
Align getStackAlignment() const
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
Align getPreferredAlign(const GlobalVariable *GV) const
Returns the preferred alignment of the specified global.
StringRef getPrivateGlobalPrefix() const
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
Diagnostic information for unsupported feature in backend.
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
Class to represent fixed width SIMD vectors.
unsigned getNumElements() const
static FixedVectorType * get(Type *ElementType, unsigned NumElts)
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasStructRetAttr() const
Determine if the function returns a structure through first or second pointer argument.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool hasExternalWeakLinkage() const
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
@ InternalLinkage
Rename collisions when linking (static functions).
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
Common base class shared among various IRBuilders.
const std::string & getConstraintString() const
const std::string & getAsmString() const
int getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
bool isEmpty() const
Returns true if there are no itineraries.
bool hasAtomicStore() const
Return true if this atomic instruction stores to memory.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
const BasicBlock * getParent() const
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Class to represent integer types.
static bool LowerToByteSwap(CallInst *CI)
Try to replace a call instruction with a call to a bswap intrinsic.
This is an important class for using LLVM in a threaded context.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
An instruction for reading from memory.
unsigned getPointerAddressSpace() const
Returns the address space of the pointer operand.
Value * getPointerOperand()
unsigned getAlignment() const
Return the alignment of the access that is being performed.
Align getAlign() const
Return the alignment of the access that is being performed.
This class is used to represent ISD::LOAD nodes.
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
const MCOperandInfo * OpInfo
bool isOptionalDef() const
Set if this operand is a optional def.
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
static MVT getFloatingPointVT(unsigned BitWidth)
static auto integer_fixedlen_vector_valuetypes()
bool isInteger() const
Return true if this is an integer or a vector integer type.
static MVT getVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
bool canFallThrough()
Return true if the block can implicitly transfer control to the block after it by falling off the end...
succ_iterator succ_begin()
std::vector< MachineBasicBlock * >::iterator succ_iterator
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineBasicBlock * getFallThrough()
Return the fallthrough block if the block can implicitly transfer control to the block after it by fa...
MachineBasicBlock * splitAt(MachineInstr &SplitInst, bool UpdateLiveIns=true, LiveIntervals *LIS=nullptr)
Split a basic block into 2 pieces at SplitPoint.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
void setAdjustsStack(bool V)
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
void computeMaxCallFrameSize(const MachineFunction &MF)
Computes the maximum size of a callframe and the AdjustsStack property.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
bool isFixedObjectIndex(int ObjectIdx) const
Returns true if the specified index corresponds to a fixed stack object.
int getFunctionContextIndex() const
Return the index for the function context object.
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & reset(Property P)
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
unsigned getFunctionNumber() const
getFunctionNumber - Return a unique ID for the current function.
MachineJumpTableInfo * getOrCreateJumpTableInfo(unsigned JTEntryKind)
getOrCreateJumpTableInfo - Get the JumpTableInfo for this function, if it does already exist,...
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
void push_back(MachineBasicBlock *MBB)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
BasicBlockListType::iterator iterator
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const MachineFunctionProperties & getProperties() const
Get the function properties.
bool hasCallSiteLandingPad(MCSymbol *Sym)
Return true if the landing pad Eh symbol has an associated call site.
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
void insert(iterator MBBI, MachineBasicBlock *MBB)
SmallVectorImpl< unsigned > & getCallSiteLandingPad(MCSymbol *Sym)
Get the call site indexes for a landing pad EH symbol.
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
const MachineOperand & getOperand(unsigned i) const
unsigned createJumpTableIndex(const std::vector< MachineBasicBlock * > &DestBBs)
createJumpTableIndex - Create a new jump table.
@ EK_Inline
EK_Inline - Jump table entries are emitted inline at their point of use.
A description of a memory reference used in the backend.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
This class is used to represent an MLOAD node.
This class is used to represent an MSTORE node.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
A Module instance is used to store all the information related to an LLVM module.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Class to represent pointers.
Type * getElementType() const
Wrapper class representing virtual and physical registers.
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
const DebugLoc & getDebugLoc() const
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
static bool hasPredecessorHelper(const SDNode *N, SmallPtrSetImpl< const SDNode * > &Visited, SmallVectorImpl< const SDNode * > &Worklist, unsigned int MaxSteps=0, bool TopologicalPrune=false)
Returns true if N is a predecessor of any node in Worklist.
unsigned getNumOperands() const
Return the number of values used by this operation.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool isUndef() const
Return true if the type of the node type undefined.
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
const SDValue & getOperand(unsigned i) const
uint64_t getScalarValueSizeInBits() const
unsigned getResNo() const
get the index which selects a specific result in the SDNode
uint64_t getConstantOperandVal(unsigned i) const
unsigned getOpcode() const
unsigned getNumOperands() const
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue UnrollVectorOp(SDNode *N, unsigned ResNE=0)
Utility function used by legalize and lowering to "unroll" a vector operation by splitting out the sc...
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=None, int Offset=0, unsigned TargetFlags=0)
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
std::pair< SDValue, SDValue > SplitVectorOperand(const SDNode *N, unsigned OpNo)
Split the node's operand with EXTRACT_SUBVECTOR and return the low/high part.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
void setNodeMemRefs(MachineSDNode *N, ArrayRef< MachineMemOperand * > NewMemRefs)
Mutate the specified machine node's memory references to the provided list.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT)
Return the expression required to zero extend the Op value assuming it was the smaller SrcTy value.
const DataLayout & getDataLayout() const
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getAllOnesConstant(const SDLoc &DL, EVT VT, bool IsTarget=false, bool IsOpaque=false)
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
MaybeAlign InferPtrAlign(SDValue Ptr) const
Infer alignment of a load / store address.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getTargetExtractSubreg(int SRIdx, const SDLoc &DL, EVT VT, SDValue Operand)
A convenience function for creating TargetInstrInfo::EXTRACT_SUBREG nodes.
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, uint64_t Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * getNodeIfExists(unsigned Opcode, SDVTList VTList, ArrayRef< SDValue > Ops, const SDNodeFlags Flags)
Get the specified node if it's already available, or else return NULL.
void addCallSiteInfo(const SDNode *CallNode, CallSiteInfoImpl &&CallInfo)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
SDValue getMaskedLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Base, SDValue Offset, SDValue Mask, SDValue Src0, EVT MemVT, MachineMemOperand *MMO, ISD::MemIndexedMode AM, ISD::LoadExtType, bool IsExpanding=false)
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
SDValue getLogicalNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a logical NOT operation as (XOR Val, BooleanOne).
This instruction constructs a fixed permutation of two input vectors.
static bool isIdentityMask(ArrayRef< int > Mask)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
VectorType * getType() const
Overload to return most specific vector type.
static void getShuffleMask(const Constant *Mask, SmallVectorImpl< int > &Result)
Convert the input shuffle mask operand to a vector of integers.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
static bool isSplatMask(const int *Mask, EVT VT)
ArrayRef< int > getMask() const
typename SuperClass::iterator iterator
An instruction for storing to memory.
This class is used to represent ISD::STORE nodes.
StringRef - Represent a constant reference to a string, i.e.
const unsigned char * bytes_end() const
LLVM_NODISCARD size_t size() const
size - Get the string size.
const unsigned char * bytes_begin() const
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
void setTargetDAGCombine(ISD::NodeType NT)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC)
Override the default CondCode to be used to test the result of the comparison libcall against zero.
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const
Get the CallingConv that should be used for the specified libcall.
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setIndexedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
const TargetMachine & getTargetMachine() const
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and ind...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
virtual unsigned getMaxSupportedInterleaveFactor() const
Get the maximum supported factor for interleaved memory accesses.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and in...
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
const char * getLibcallName(RTLIB::Libcall Call) const
Get the libcall routine name for the specified libcall.
std::vector< ArgListEntry > ArgListTy
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
void setIndexedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
bool SimplifyDemandedVectorElts(SDValue Op, const APInt &DemandedEltMask, APInt &KnownUndef, APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Vector Op.
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
bool parametersInCSRMatch(const MachineRegisterInfo &MRI, const uint32_t *CallerPreservedMask, const SmallVectorImpl< CCValAssign > &ArgLocs, const SmallVectorImpl< SDValue > &OutVals) const
Check whether parameters to a call that are passed in callee saved registers are the same as from the...
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
SDValue buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, SDValue N1, MutableArrayRef< int > Mask, SelectionDAG &DAG) const
Tries to build a legal vector shuffle using the provided parameters or equivalent variations.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0, bool AssumeSingleUse=false) const
Look at Op.
virtual bool SimplifyDemandedBitsForTargetNode(SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth=0) const
Attempt to simplify any target nodes based on the demanded bits/elts, returning true on success.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool expandABS(SDNode *N, SDValue &Result, SelectionDAG &DAG, bool IsNegative=false) const
Expand ABS nodes.
bool isConstTrueVal(const SDNode *N) const
Return if the N is a constant or constant vector equal to the true value from getBooleanContents().
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
const Triple & getTargetTriple() const
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
virtual const TargetSubtargetInfo * getSubtargetImpl(const Function &) const
Virtual method implemented by subclasses that returns a reference to that target's TargetSubtargetInf...
unsigned EnableFastISel
EnableFastISel - This flag enables fast-path instruction selection which trades away generated code q...
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
ObjectFormatType getObjectFormat() const
getFormat - Get the object format for this triple.
bool isOSMSVCRT() const
Is this a "Windows" OS targeting a "MSVCRT.dll" environment.
bool isOSVersionLT(unsigned Major, unsigned Minor=0, unsigned Micro=0) const
isOSVersionLT - Helper function for doing comparisons against version numbers included in the target ...
bool isWindowsMSVCEnvironment() const
Checks if the environment could be MSVC.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
ScalarTy getFixedSize() const
static TypeSize Fixed(ScalarTy MinVal)
The instances of the Type class are immutable: once they are created, they are never changed.
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static PointerType * getInt8PtrTy(LLVMContext &C, unsigned AS=0)
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
static IntegerType * getInt16Ty(LLVMContext &C)
bool isHalfTy() const
Return true if this is 'half', a 16-bit IEEE fp type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
A Use represents the edge between a Value definition and its users.
const Use & getOperandUse(unsigned i) const
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
Base class of all SIMD vector types.
Type * getElementType() const
Implementation for an ilist node.
ilist_node_impl()=default
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
static CondCodes getOppositeCondition(CondCodes CC)
@ SECREL
Thread Pointer Offset.
@ SBREL
Section Relative (Windows TLS)
@ GOTTPOFF
Global Offset Table, PC Relative.
@ TPOFF
Global Offset Table, Thread Pointer Offset.
TOF
Target Operand Flag enum.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_SBREL
MO_SBREL - On a symbol operand, this represents a static base relative relocation.
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
static ShiftOpc getShiftOpcForNode(unsigned Opcode)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
int getFP32Imm(const APInt &Imm)
getFP32Imm - Return an 8-bit floating-point version of the 32-bit floating-point value.
uint64_t decodeVMOVModImm(unsigned ModImm, unsigned &EltBits)
decodeVMOVModImm - Decode a NEON/MVE modified immediate value into the element value and the element ...
unsigned getAM2Offset(unsigned AM2Opc)
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
unsigned createVMOVModImm(unsigned OpCmode, unsigned Val)
int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
int getFP16Imm(const APInt &Imm)
getFP16Imm - Return an 8-bit floating-point version of the 16-bit floating-point value.
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
int getFP32FP16Imm(const APInt &Imm)
If this is a FP16Imm encoded as a fp32 value, return the 8-bit encoding for it.
AddrOpc getAM2Op(unsigned AM2Opc)
bool isBitFieldInvertedMask(unsigned v)
const unsigned RoundingBitsPos
FastISel * createFastISel(FunctionLoweringInfo &funcInfo, const TargetLibraryInfo *libInfo)
std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ ARM_APCS
ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete, but still used on some target...
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
@ ARM_AAPCS
ARM_AAPCS - ARM Architecture Procedure Calling Standard calling convention (aka EABI).
@ Fast
Fast - This calling convention attempts to make calls as fast as possible (e.g.
@ Tail
Tail - This calling convention attemps to make calls as fast as possible while guaranteeing that tail...
@ SwiftTail
SwiftTail - This follows the Swift calling convention in how arguments are passed but guarantees tail...
@ ARM_AAPCS_VFP
ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
@ C
C - The default llvm calling convention, compatible with C.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ FLT_ROUNDS_
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ EH_SJLJ_SETUP_DISPATCH
OUTCHAIN = EH_SJLJ_SETUP_DISPATCH(INCHAIN) The target initializes the dispatch table here.
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ ATOMIC_FENCE
OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope) This corresponds to the fence instruction.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
@ SET_ROUNDING
Set rounding mode.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ BR
Control flow instructions. These all have token chains.
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ ADDCARRY
Carry-using nodes for multiple precision addition and subtraction.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ SSUBO
Same for subtraction.
@ BR_JT
BR_JT - Jumptable branch.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ UNDEF
UNDEF - An undefined node.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ CopyFromReg
CopyFromReg - This node indicates that the input value is a virtual or physical register that is defi...
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ READ_REGISTER
READ_REGISTER, WRITE_REGISTER - This node represents llvm.register on the DAG, which implements the n...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ CopyToReg
CopyToReg - This node has three operands: a chain, a register number to set to this value,...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ SMULO
Same for multiplication.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
CondCode getSetCCInverse(CondCode Operation, EVT Type)
Return the operation corresponding to !(X op Y), where 'op' is a valid SetCC operation.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
CondCode getSetCCSwappedOperands(CondCode Operation)
Return the operation corresponding to (Y op X) when given the operation for (X op Y).
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
bool isConstantSplatVector(const SDNode *N, APInt &SplatValue)
Node predicates.
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
static const int LAST_INDEXED_MODE
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Flag
These should be considered private to the implementation of the MCInstrDesc class.
bool match(Val *V, const Pattern &P)
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_or< CastClass_match< OpTy, Instruction::ZExt >, CastClass_match< OpTy, Instruction::SExt > > m_ZExtOrSExt(const OpTy &Op)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
auto m_Undef()
Match an arbitrary undef constant.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
initializer< Ty > init(const Ty &Val)
---------------------— PointerInfo ------------------------------------—
void dump(const SparseBitVector< ElementSize > &LHS, raw_ostream &out)
auto find(R &&Range, const T &Val)
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly.
constexpr bool isUInt< 8 >(uint64_t x)
bool CC_ARM_APCS_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
bool operator==(uint64_t V1, const APInt &V2)
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
bool isUIntN(unsigned N, uint64_t x)
Checks if an unsigned integer fits into the given (dynamic) bit width.
bool isStrongerThanMonotonic(AtomicOrdering AO)
constexpr bool isMask_32(uint32_t Value)
Return true if the argument is a non-empty sequence of ones starting at the least significant bit wit...
bool FastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
bool RetCC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
Value * concatenateVectors(IRBuilderBase &Builder, ArrayRef< Value * > Vecs)
Concatenate a list of vectors.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
void shuffle(Iterator first, Iterator last, RNG &&g)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
bool CC_ARM_AAPCS_VFP(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isShiftedMask_32(uint32_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (32 bit ver...
bool CC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isBitwiseNot(SDValue V, bool AllowUndefs=false)
Returns true if V is a bitwise not operation.
bool RetCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
unsigned countLeadingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the most significant bit to the least stopping at the first 1.
bool CC_ARM_Win32_CFGuard_Check(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
constexpr size_t array_lengthof(T(&)[N])
Find the length of an array.
unsigned countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
unsigned countTrailingOnes(T Value, ZeroBehavior ZB=ZB_Width)
Count the number of ones from the least significant bit to the first zero bit.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Mod
The access may modify the value stored in memory.
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
@ Mul
Product of integers.
@ And
Bitwise or logical AND of integers.
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
bool RetFastCC_ARM_APCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool CC_ARM_AAPCS(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr uint64_t MinAlign(uint64_t A, uint64_t B)
A and B are either alignments or offsets.
std::enable_if_t<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type > cast(const Y &Val)
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isAcquireOrStronger(AtomicOrdering AO)
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr unsigned BitWidth
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
bool isOneConstant(SDValue V)
Returns true if V is a constant integer one.
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
bool isVREVMask(ArrayRef< int > M, EVT VT, unsigned BlockSize)
isVREVMask - Check if a vector shuffle corresponds to a VREV instruction with the specified blocksize...
unsigned gettBLXrOpcode(const MachineFunction &MF)
llvm::SmallVector< int, 16 > createSequentialMask(unsigned Start, unsigned NumInts, unsigned NumUndefs)
Create a sequential shuffle mask.
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
ElementCount getVectorElementCount() const
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
static EVT getEVT(Type *Ty, bool HandleUnknown=false)
Return the value type corresponding to the specified type.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
bool is128BitVector() const
Return true if this is a 128-bit vector type.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
static EVT getFloatingPointVT(unsigned BitWidth)
Returns the EVT that represents a floating-point type with the given number of bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
bool is64BitVector() const
Return true if this is a 64-bit vector type.
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
static KnownBits commonBits(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits common to LHS and RHS.
bool isUnknown() const
Returns true if we don't know any bits.
static KnownBits mul(const KnownBits &LHS, const KnownBits &RHS)
Compute known bits resulting from multiplying LHS and RHS.
unsigned getBitWidth() const
Get the bit width of this value.
KnownBits zext(unsigned BitWidth) const
Return known bits for a zero extension of the value we're tracking.
void resetAll()
Resets the known state of all bits.
KnownBits sext(unsigned BitWidth) const
Return known bits for a sign extension of the value we're tracking.
static KnownBits computeForAddSub(bool Add, bool NSW, const KnownBits &LHS, KnownBits RHS)
Compute known bits resulting from adding LHS and RHS.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg If BaseGV is null...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setInRegister(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
CallLoweringInfo & setDiscardResult(bool Value=true)
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
CallLoweringInfo & setChain(SDValue InChain)
A convenience struct that encapsulates a DAG, and two SDValues for returning information from TargetL...